Compare commits

..

No commits in common. "docs" and "master" have entirely different histories.
docs ... master

1130 changed files with 95653 additions and 2396 deletions

View file

@ -0,0 +1,36 @@
{
"name": "Java",
"image": "mcr.microsoft.com/devcontainers/java:0-17",
"features": {
"ghcr.io/devcontainers/features/java:1": {
"version": "none",
"installMaven": "true",
"installGradle": "false"
},
"ghcr.io/devcontainers/features/docker-in-docker:2": {}
},
// Use 'forwardPorts' to make a list of ports inside the container available locally.
// "forwardPorts": [],
// Use 'postCreateCommand' to run commands after the container is created.
// "postCreateCommand": "java -version",
"customizations": {
"vscode": {
"extensions" : [
"vscjava.vscode-java-pack",
"vscjava.vscode-maven",
"vscjava.vscode-java-debug",
"EditorConfig.EditorConfig",
"ms-azuretools.vscode-docker",
"antfu.vite",
"ms-kubernetes-tools.vscode-kubernetes-tools",
"github.vscode-pull-request-github"
]
}
}
}

286
.editorconfig Normal file
View file

@ -0,0 +1,286 @@
[*]
charset = utf-8
end_of_line = lf
indent_size = 4
indent_style = space
insert_final_newline = true
max_line_length = 120
tab_width = 4
ij_continuation_indent_size = 8
ij_formatter_off_tag = @formatter:off
ij_formatter_on_tag = @formatter:on
ij_formatter_tags_enabled = true
ij_smart_tabs = false
ij_visual_guides = none
ij_wrap_on_typing = false
trim_trailing_whitespace = true
[*.java]
indent_size = 2
ij_continuation_indent_size = 4
ij_java_align_consecutive_assignments = false
ij_java_align_consecutive_variable_declarations = false
ij_java_align_group_field_declarations = false
ij_java_align_multiline_annotation_parameters = false
ij_java_align_multiline_array_initializer_expression = false
ij_java_align_multiline_assignment = false
ij_java_align_multiline_binary_operation = false
ij_java_align_multiline_chained_methods = false
ij_java_align_multiline_extends_list = false
ij_java_align_multiline_for = true
ij_java_align_multiline_method_parentheses = false
ij_java_align_multiline_parameters = true
ij_java_align_multiline_parameters_in_calls = false
ij_java_align_multiline_parenthesized_expression = false
ij_java_align_multiline_records = true
ij_java_align_multiline_resources = true
ij_java_align_multiline_ternary_operation = false
ij_java_align_multiline_text_blocks = false
ij_java_align_multiline_throws_list = false
ij_java_align_subsequent_simple_methods = false
ij_java_align_throws_keyword = false
ij_java_align_types_in_multi_catch = true
ij_java_annotation_parameter_wrap = off
ij_java_array_initializer_new_line_after_left_brace = false
ij_java_array_initializer_right_brace_on_new_line = false
ij_java_array_initializer_wrap = normal
ij_java_assert_statement_colon_on_next_line = false
ij_java_assert_statement_wrap = normal
ij_java_assignment_wrap = normal
ij_java_binary_operation_sign_on_next_line = false
ij_java_binary_operation_wrap = normal
ij_java_blank_lines_after_anonymous_class_header = 0
ij_java_blank_lines_after_class_header = 0
ij_java_blank_lines_after_imports = 1
ij_java_blank_lines_after_package = 1
ij_java_blank_lines_around_class = 1
ij_java_blank_lines_around_field = 0
ij_java_blank_lines_around_field_in_interface = 0
ij_java_blank_lines_around_initializer = 1
ij_java_blank_lines_around_method = 1
ij_java_blank_lines_around_method_in_interface = 1
ij_java_blank_lines_before_class_end = 0
ij_java_blank_lines_before_imports = 1
ij_java_blank_lines_before_method_body = 0
ij_java_blank_lines_before_package = 1
ij_java_block_brace_style = end_of_line
ij_java_block_comment_add_space = false
ij_java_block_comment_at_first_column = true
ij_java_builder_methods = none
ij_java_call_parameters_new_line_after_left_paren = false
ij_java_call_parameters_right_paren_on_new_line = false
ij_java_call_parameters_wrap = normal
ij_java_case_statement_on_separate_line = true
ij_java_catch_on_new_line = false
ij_java_class_annotation_wrap = split_into_lines
ij_java_class_brace_style = end_of_line
ij_java_class_count_to_use_import_on_demand = 999
ij_java_class_names_in_javadoc = 1
ij_java_do_not_indent_top_level_class_members = false
ij_java_do_not_wrap_after_single_annotation = false
ij_java_do_not_wrap_after_single_annotation_in_parameter = false
ij_java_do_while_brace_force = always
ij_java_doc_add_blank_line_after_description = true
ij_java_doc_add_blank_line_after_param_comments = false
ij_java_doc_add_blank_line_after_return = false
ij_java_doc_add_p_tag_on_empty_lines = true
ij_java_doc_align_exception_comments = true
ij_java_doc_align_param_comments = true
ij_java_doc_do_not_wrap_if_one_line = false
ij_java_doc_enable_formatting = true
ij_java_doc_enable_leading_asterisks = true
ij_java_doc_indent_on_continuation = false
ij_java_doc_keep_empty_lines = true
ij_java_doc_keep_empty_parameter_tag = true
ij_java_doc_keep_empty_return_tag = true
ij_java_doc_keep_empty_throws_tag = true
ij_java_doc_keep_invalid_tags = true
ij_java_doc_param_description_on_new_line = false
ij_java_doc_preserve_line_breaks = false
ij_java_doc_use_throws_not_exception_tag = true
ij_java_else_on_new_line = false
ij_java_entity_dd_suffix = EJB
ij_java_entity_eb_suffix = Bean
ij_java_entity_hi_suffix = Home
ij_java_entity_lhi_prefix = Local
ij_java_entity_lhi_suffix = Home
ij_java_entity_li_prefix = Local
ij_java_entity_pk_class = java.lang.String
ij_java_entity_vo_suffix = VO
ij_java_enum_constants_wrap = normal
ij_java_extends_keyword_wrap = normal
ij_java_extends_list_wrap = normal
ij_java_field_annotation_wrap = split_into_lines
ij_java_finally_on_new_line = false
ij_java_for_brace_force = always
ij_java_for_statement_new_line_after_left_paren = false
ij_java_for_statement_right_paren_on_new_line = false
ij_java_for_statement_wrap = normal
ij_java_generate_final_locals = false
ij_java_generate_final_parameters = false
ij_java_if_brace_force = always
ij_java_imports_layout = $*,|,*
ij_java_indent_case_from_switch = true
ij_java_insert_inner_class_imports = false
ij_java_insert_override_annotation = true
ij_java_keep_blank_lines_before_right_brace = 2
ij_java_keep_blank_lines_between_package_declaration_and_header = 2
ij_java_keep_blank_lines_in_code = 2
ij_java_keep_blank_lines_in_declarations = 2
ij_java_keep_builder_methods_indents = false
ij_java_keep_control_statement_in_one_line = true
ij_java_keep_first_column_comment = true
ij_java_keep_indents_on_empty_lines = false
ij_java_keep_line_breaks = true
ij_java_keep_multiple_expressions_in_one_line = false
ij_java_keep_simple_blocks_in_one_line = false
ij_java_keep_simple_classes_in_one_line = false
ij_java_keep_simple_lambdas_in_one_line = false
ij_java_keep_simple_methods_in_one_line = false
ij_java_label_indent_absolute = false
ij_java_label_indent_size = 0
ij_java_lambda_brace_style = end_of_line
ij_java_layout_static_imports_separately = true
ij_java_line_comment_add_space = false
ij_java_line_comment_add_space_on_reformat = false
ij_java_line_comment_at_first_column = true
ij_java_message_dd_suffix = EJB
ij_java_message_eb_suffix = Bean
ij_java_method_annotation_wrap = split_into_lines
ij_java_method_brace_style = end_of_line
ij_java_method_call_chain_wrap = normal
ij_java_method_parameters_new_line_after_left_paren = false
ij_java_method_parameters_right_paren_on_new_line = false
ij_java_method_parameters_wrap = normal
ij_java_modifier_list_wrap = false
ij_java_multi_catch_types_wrap = normal
ij_java_names_count_to_use_import_on_demand = 999
ij_java_new_line_after_lparen_in_annotation = false
ij_java_new_line_after_lparen_in_record_header = false
ij_java_parameter_annotation_wrap = normal
ij_java_parentheses_expression_new_line_after_left_paren = false
ij_java_parentheses_expression_right_paren_on_new_line = false
ij_java_place_assignment_sign_on_next_line = false
ij_java_prefer_longer_names = true
ij_java_prefer_parameters_wrap = false
ij_java_record_components_wrap = normal
ij_java_repeat_synchronized = true
ij_java_replace_instanceof_and_cast = false
ij_java_replace_null_check = true
ij_java_replace_sum_lambda_with_method_ref = true
ij_java_resource_list_new_line_after_left_paren = false
ij_java_resource_list_right_paren_on_new_line = false
ij_java_resource_list_wrap = normal
ij_java_rparen_on_new_line_in_annotation = false
ij_java_rparen_on_new_line_in_record_header = false
ij_java_session_dd_suffix = EJB
ij_java_session_eb_suffix = Bean
ij_java_session_hi_suffix = Home
ij_java_session_lhi_prefix = Local
ij_java_session_lhi_suffix = Home
ij_java_session_li_prefix = Local
ij_java_session_si_suffix = Service
ij_java_space_after_closing_angle_bracket_in_type_argument = false
ij_java_space_after_colon = true
ij_java_space_after_comma = true
ij_java_space_after_comma_in_type_arguments = true
ij_java_space_after_for_semicolon = true
ij_java_space_after_quest = true
ij_java_space_after_type_cast = true
ij_java_space_before_annotation_array_initializer_left_brace = false
ij_java_space_before_annotation_parameter_list = false
ij_java_space_before_array_initializer_left_brace = true
ij_java_space_before_catch_keyword = true
ij_java_space_before_catch_left_brace = true
ij_java_space_before_catch_parentheses = true
ij_java_space_before_class_left_brace = true
ij_java_space_before_colon = true
ij_java_space_before_colon_in_foreach = true
ij_java_space_before_comma = false
ij_java_space_before_do_left_brace = true
ij_java_space_before_else_keyword = true
ij_java_space_before_else_left_brace = true
ij_java_space_before_finally_keyword = true
ij_java_space_before_finally_left_brace = true
ij_java_space_before_for_left_brace = true
ij_java_space_before_for_parentheses = true
ij_java_space_before_for_semicolon = false
ij_java_space_before_if_left_brace = true
ij_java_space_before_if_parentheses = true
ij_java_space_before_method_call_parentheses = false
ij_java_space_before_method_left_brace = true
ij_java_space_before_method_parentheses = false
ij_java_space_before_opening_angle_bracket_in_type_parameter = false
ij_java_space_before_quest = true
ij_java_space_before_switch_left_brace = true
ij_java_space_before_switch_parentheses = true
ij_java_space_before_synchronized_left_brace = true
ij_java_space_before_synchronized_parentheses = true
ij_java_space_before_try_left_brace = true
ij_java_space_before_try_parentheses = true
ij_java_space_before_type_parameter_list = false
ij_java_space_before_while_keyword = true
ij_java_space_before_while_left_brace = true
ij_java_space_before_while_parentheses = true
ij_java_space_inside_one_line_enum_braces = false
ij_java_space_within_empty_array_initializer_braces = false
ij_java_space_within_empty_method_call_parentheses = false
ij_java_space_within_empty_method_parentheses = false
ij_java_spaces_around_additive_operators = true
ij_java_spaces_around_annotation_eq = true
ij_java_spaces_around_assignment_operators = true
ij_java_spaces_around_bitwise_operators = true
ij_java_spaces_around_equality_operators = true
ij_java_spaces_around_lambda_arrow = true
ij_java_spaces_around_logical_operators = true
ij_java_spaces_around_method_ref_dbl_colon = false
ij_java_spaces_around_multiplicative_operators = true
ij_java_spaces_around_relational_operators = true
ij_java_spaces_around_shift_operators = true
ij_java_spaces_around_type_bounds_in_type_parameters = true
ij_java_spaces_around_unary_operator = false
ij_java_spaces_within_angle_brackets = false
ij_java_spaces_within_annotation_parentheses = false
ij_java_spaces_within_array_initializer_braces = false
ij_java_spaces_within_braces = false
ij_java_spaces_within_brackets = false
ij_java_spaces_within_cast_parentheses = false
ij_java_spaces_within_catch_parentheses = false
ij_java_spaces_within_for_parentheses = false
ij_java_spaces_within_if_parentheses = false
ij_java_spaces_within_method_call_parentheses = false
ij_java_spaces_within_method_parentheses = false
ij_java_spaces_within_parentheses = false
ij_java_spaces_within_record_header = false
ij_java_spaces_within_switch_parentheses = false
ij_java_spaces_within_synchronized_parentheses = false
ij_java_spaces_within_try_parentheses = false
ij_java_spaces_within_while_parentheses = false
ij_java_special_else_if_treatment = true
ij_java_subclass_name_suffix = Impl
ij_java_ternary_operation_signs_on_next_line = false
ij_java_ternary_operation_wrap = normal
ij_java_test_name_suffix = Test
ij_java_throws_keyword_wrap = normal
ij_java_throws_list_wrap = normal
ij_java_use_external_annotations = false
ij_java_use_fq_class_names = false
ij_java_use_relative_indents = false
ij_java_use_single_class_imports = true
ij_java_variable_annotation_wrap = normal
ij_java_visibility = public
ij_java_while_brace_force = always
ij_java_while_on_new_line = false
ij_java_wrap_comments = false
ij_java_wrap_first_method_in_call_chain = false
ij_java_wrap_long_lines = false
[*.md]
insert_final_newline = false
trim_trailing_whitespace = false
[*.yaml]
indent_size = 2
[*.yml]
indent_size = 2

Binary file not shown.

Before

Width:  |  Height:  |  Size: 177 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 127 KiB

18
.github/CODEOWNERS vendored Normal file
View file

@ -0,0 +1,18 @@
* @Haarolean
# BACKEND
/pom.xml @provectus/kafka-backend
/kafka-ui-contract/ @provectus/kafka-backend
/kafka-ui-api/ @provectus/kafka-backend
# FRONTEND
/kafka-ui-react-app/ @provectus/kafka-frontend
# TESTS
/kafka-ui-e2e-checks/ @provectus/kafka-qa
# INFRA
/.github/workflows/ @provectus/kafka-devops

92
.github/ISSUE_TEMPLATE/bug.yml vendored Normal file
View file

@ -0,0 +1,92 @@
name: "\U0001F41E Bug report"
description: File a bug report
labels: ["status/triage", "type/bug"]
assignees: []
body:
- type: markdown
attributes:
value: |
Hi, thanks for raising the issue(-s), all contributions really matter!
Please, note that we'll close the issue without further explanation if you don't follow
this template and don't provide the information requested within this template.
- type: checkboxes
id: terms
attributes:
label: Issue submitter TODO list
description: By you checking these checkboxes we can be sure you've done the essential things.
options:
- label: I've looked up my issue in [FAQ](https://docs.kafka-ui.provectus.io/faq/common-problems)
required: true
- label: I've searched for an already existing issues [here](https://github.com/provectus/kafka-ui/issues)
required: true
- label: I've tried running `master`-labeled docker image and the issue still persists there
required: true
- label: I'm running a supported version of the application which is listed [here](https://github.com/provectus/kafka-ui/blob/master/SECURITY.md)
required: true
- type: textarea
attributes:
label: Describe the bug (actual behavior)
description: A clear and concise description of what the bug is. Use a list, if there is more than one problem
validations:
required: true
- type: textarea
attributes:
label: Expected behavior
description: A clear and concise description of what you expected to happen
validations:
required: false
- type: textarea
attributes:
label: Your installation details
description: |
How do you run the app? Please provide as much info as possible:
1. App version (commit hash in the top left corner of the UI)
2. Helm chart version, if you use one
3. Your application config. Please remove the sensitive info like passwords or API keys.
4. Any IAAC configs
validations:
required: true
- type: textarea
attributes:
label: Steps to reproduce
description: |
Please write down the order of the actions required to reproduce the issue.
For the advanced setups/complicated issue, we might need you to provide
a minimal [reproducible example](https://stackoverflow.com/help/minimal-reproducible-example).
validations:
required: true
- type: textarea
attributes:
label: Screenshots
description: |
If applicable, add screenshots to help explain your problem
validations:
required: false
- type: textarea
attributes:
label: Logs
description: |
If applicable, *upload* screenshots to help explain your problem
validations:
required: false
- type: textarea
attributes:
label: Additional context
description: |
Add any other context about the problem here. E.G.:
1. Are there any alternative scenarios (different data/methods/configuration/setup) you have tried?
Were they successful or the same issue occurred? Please provide steps as well.
2. Related issues (if there are any).
3. Logs (if available)
4. Is there any serious impact or behaviour on the end-user because of this issue, that can be overlooked?
validations:
required: false

14
.github/ISSUE_TEMPLATE/config.yml vendored Normal file
View file

@ -0,0 +1,14 @@
blank_issues_enabled: false
contact_links:
- name: Report helm issue
url: https://github.com/provectus/kafka-ui-charts
about: Our helm charts are located in another repo. Please raise issues/PRs regarding charts in that repo.
- name: Official documentation
url: https://docs.kafka-ui.provectus.io/
about: Before reaching out for support, please refer to our documentation. Read "FAQ" and "Common problems", also try using search there.
- name: Community Discord
url: https://discord.gg/4DWzD7pGE5
about: Chat with other users, get some support or ask questions.
- name: GitHub Discussions
url: https://github.com/provectus/kafka-ui/discussions
about: An alternative place to ask questions or to get some support.

66
.github/ISSUE_TEMPLATE/feature.yml vendored Normal file
View file

@ -0,0 +1,66 @@
name: "\U0001F680 Feature request"
description: Propose a new feature
labels: ["status/triage", "type/feature"]
assignees: []
body:
- type: markdown
attributes:
value: |
Hi, thanks for raising the issue(-s), all contributions really matter!
Please, note that we'll close the issue without further explanation if you don't follow
this template and don't provide the information requested within this template.
- type: checkboxes
id: terms
attributes:
label: Issue submitter TODO list
description: By you checking these checkboxes we can be sure you've done the essential things.
options:
- label: I've searched for an already existing issues [here](https://github.com/provectus/kafka-ui/issues)
required: true
- label: I'm running a supported version of the application which is listed [here](https://github.com/provectus/kafka-ui/blob/master/SECURITY.md) and the feature is not present there
required: true
- type: textarea
attributes:
label: Is your proposal related to a problem?
description: |
Provide a clear and concise description of what the problem is.
For example, "I'm always frustrated when..."
validations:
required: false
- type: textarea
attributes:
label: Describe the feature you're interested in
description: |
Provide a clear and concise description of what you want to happen.
validations:
required: true
- type: textarea
attributes:
label: Describe alternatives you've considered
description: |
Let us know about other solutions you've tried or researched.
validations:
required: false
- type: input
attributes:
label: Version you're running
description: |
Please provide the app version you're currently running:
1. App version (commit hash in the top left corner of the UI)
validations:
required: true
- type: textarea
attributes:
label: Additional context
description: |
Is there anything else you can add about the proposal?
You might want to link to related issues here, if you haven't already.
validations:
required: false

31
.github/PULL_REQUEST_TEMPLATE.md vendored Normal file
View file

@ -0,0 +1,31 @@
<!-- ignore-task-list-start -->
- [ ] **Breaking change?** (if so, please describe the impact and migration path for existing application instances)
<!-- ignore-task-list-end -->
**What changes did you make?** (Give an overview)
**Is there anything you'd like reviewers to focus on?**
**How Has This Been Tested?** (put an "x" (case-sensitive!) next to an item)
<!-- ignore-task-list-start -->
- [ ] No need to
- [ ] Manually (please, describe, if necessary)
- [ ] Unit checks
- [ ] Integration checks
- [ ] Covered by existing automation
<!-- ignore-task-list-end -->
**Checklist** (put an "x" (case-sensitive!) next to all the items, otherwise the build will fail)
- [ ] I have performed a self-review of my own code
- [ ] I have commented my code, particularly in hard-to-understand areas
- [ ] I have made corresponding changes to the documentation (e.g. **ENVIRONMENT VARIABLES**)
- [ ] My changes generate no new warnings (e.g. Sonar is happy)
- [ ] I have added tests that prove my fix is effective or that my feature works
- [ ] New and existing unit tests pass locally with my changes
- [ ] Any dependent changes have been merged
Check out [Contributing](https://github.com/provectus/kafka-ui/blob/master/CONTRIBUTING.md) and [Code of Conduct](https://github.com/provectus/kafka-ui/blob/master/CODE-OF-CONDUCT.md)
**A picture of a cute animal (not mandatory but encouraged)**

102
.github/dependabot.yml vendored Normal file
View file

@ -0,0 +1,102 @@
version: 2
updates:
- package-ecosystem: maven
directory: "/"
schedule:
interval: daily
time: "10:00"
timezone: Europe/Moscow
reviewers:
- "Haarolean"
labels:
- "scope/backend"
- "type/dependencies"
- package-ecosystem: npm
directory: "/kafka-ui-react-app"
schedule:
interval: weekly
time: "10:00"
timezone: Europe/Moscow
open-pull-requests-limit: 10
versioning-strategy: increase-if-necessary
labels:
- "scope/frontend"
- "type/dependencies"
ignore:
- dependency-name: react-hook-form
versions:
- 6.15.5
- 7.0.0
- 7.0.6
- dependency-name: "@hookform/error-message"
versions:
- 1.1.0
- dependency-name: use-debounce
versions:
- 6.0.0
- 6.0.1
- dependency-name: "@rooks/use-outside-click-ref"
versions:
- 4.10.1
- dependency-name: react-multi-select-component
versions:
- 3.1.6
- 4.0.0
- dependency-name: husky
versions:
- 5.1.3
- 5.2.0
- 6.0.0
- dependency-name: "@types/node-fetch"
versions:
- 2.5.9
- dependency-name: "@testing-library/jest-dom"
versions:
- 5.11.10
- dependency-name: "@typescript-eslint/eslint-plugin"
versions:
- 4.20.0
- dependency-name: "@openapitools/openapi-generator-cli"
versions:
- 2.2.5
- dependency-name: "@typescript-eslint/parser"
versions:
- 4.20.0
- dependency-name: react-datepicker
versions:
- 3.7.0
- dependency-name: eslint
versions:
- 7.23.0
- dependency-name: "@testing-library/user-event"
versions:
- 13.0.6
- dependency-name: immer
versions:
- 9.0.1
- dependency-name: react-scripts
versions:
- 4.0.3
- dependency-name: eslint-config-prettier
versions:
- 8.1.0
- dependency-name: "@testing-library/react"
versions:
- 11.2.5
- dependency-name: lodash
versions:
- 4.17.21
- dependency-name: react-json-tree
versions:
- 0.15.0
- package-ecosystem: "github-actions"
directory: "/"
schedule:
interval: weekly
time: "10:00"
timezone: Europe/Moscow
reviewers:
- "Haarolean"
labels:
- "scope/infrastructure"
- "type/dependencies"

55
.github/release_drafter.yaml vendored Normal file
View file

@ -0,0 +1,55 @@
name-template: '$RESOLVED_VERSION'
tag-template: 'v$RESOLVED_VERSION'
template: |
## Changes
$CHANGES
## Contributors
$CONTRIBUTORS
exclude-labels:
- 'scope/infrastructure'
- 'scope/QA'
- 'scope/AQA'
- 'type/dependencies'
- 'type/chore'
- 'type/documentation'
- 'type/refactoring'
categories:
- title: '🚩 Breaking Changes'
labels:
- 'impact/changelog'
- title: '⚙Features'
labels:
- 'type/feature'
- title: '🪛Enhancements'
labels:
- 'type/enhancement'
- title: '🔨Bug Fixes'
labels:
- 'type/bug'
- title: 'Security'
labels:
- 'type/security'
- title: '⎈ Helm/K8S Changes'
labels:
- 'scope/k8s'
change-template: '- $TITLE @$AUTHOR (#$NUMBER)'
version-resolver:
major:
labels:
- 'major'
minor:
labels:
- 'minor'
patch:
labels:
- 'patch'
default: patch

77
.github/workflows/aws_publisher.yaml vendored Normal file
View file

@ -0,0 +1,77 @@
name: "Infra: Release: AWS Marketplace Publisher"
on:
workflow_dispatch:
inputs:
KafkaUIInfraBranch:
description: 'Branch name of Kafka-UI-Infra repo, build commands will be executed from this branch'
required: true
default: 'master'
KafkaUIReleaseVersion:
description: 'Version of KafkaUI'
required: true
default: '0.3.2'
PublishOnMarketplace:
description: 'If set to true, the request to update AWS Server product version will be raised'
required: true
default: false
type: boolean
jobs:
build-ami:
name: Build AMI
runs-on: ubuntu-latest
steps:
- name: Clone infra repo
run: |
echo "Cloning repo..."
git clone https://infra-tech:${{ secrets.INFRA_USER_ACCESS_TOKEN }}@github.com/provectus/kafka-ui-infra.git --branch ${{ github.event.inputs.KafkaUIInfraBranch }}
echo "Cd to packer DIR..."
cd kafka-ui-infra/ami
echo "WORK_DIR=$(pwd)" >> $GITHUB_ENV
echo "Packer will be triggered in this dir $WORK_DIR"
- name: Configure AWS credentials for Kafka-UI account
uses: aws-actions/configure-aws-credentials@v3
with:
aws-access-key-id: ${{ secrets.AWS_AMI_PUBLISH_KEY_ID }}
aws-secret-access-key: ${{ secrets.AWS_AMI_PUBLISH_KEY_SECRET }}
aws-region: us-east-1
# validate templates
- name: Validate Template
uses: hashicorp/packer-github-actions@master
with:
command: validate
arguments: -syntax-only
target: kafka-ui-infra/ami/kafka-ui.pkr.hcl
# build artifact
- name: Build Artifact
uses: hashicorp/packer-github-actions@master
with:
command: build
arguments: "-color=false -on-error=abort -var=kafka_ui_release_version=${{ github.event.inputs.KafkaUIReleaseVersion }}"
target: kafka-ui.pkr.hcl
working_directory: ${{ env.WORK_DIR }}
env:
PACKER_LOG: 1
# add fresh AMI to AWS Marketplace
- name: Publish Artifact at Marketplace
if: ${{ github.event.inputs.PublishOnMarketplace == 'true' }}
env:
PRODUCT_ID: ${{ secrets.AWS_SERVER_PRODUCT_ID }}
RELEASE_VERSION: "${{ github.event.inputs.KafkaUIReleaseVersion }}"
RELEASE_NOTES: "https://github.com/provectus/kafka-ui/releases/tag/v${{ github.event.inputs.KafkaUIReleaseVersion }}"
MP_ROLE_ARN: ${{ secrets.AWS_MARKETPLACE_AMI_ACCESS_ROLE }} # https://docs.aws.amazon.com/marketplace/latest/userguide/ami-single-ami-products.html#single-ami-marketplace-ami-access
AMI_OS_VERSION: "amzn2-ami-kernel-5.10-hvm-*-x86_64-gp2"
run: |
set -x
pwd
ls -la kafka-ui-infra/ami
echo $WORK_DIR/manifest.json
export AMI_ID=$(jq -r '.builds[-1].artifact_id' kafka-ui-infra/ami/manifest.json | cut -d ":" -f2)
/bin/bash kafka-ui-infra/aws-marketplace/prepare_changeset.sh > changeset.json
aws marketplace-catalog start-change-set \
--catalog "AWSMarketplace" \
--change-set "$(cat changeset.json)"

56
.github/workflows/backend.yml vendored Normal file
View file

@ -0,0 +1,56 @@
name: "Backend: PR/master build & test"
on:
push:
branches:
- master
pull_request_target:
types: ["opened", "edited", "reopened", "synchronize"]
paths:
- "kafka-ui-api/**"
- "pom.xml"
permissions:
checks: write
pull-requests: write
jobs:
build-and-test:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
with:
fetch-depth: 0
ref: ${{ github.event.pull_request.head.sha }}
- name: Set up JDK
uses: actions/setup-java@v3
with:
java-version: '17'
distribution: 'zulu'
cache: 'maven'
- name: Cache SonarCloud packages
uses: actions/cache@v3
with:
path: ~/.sonar/cache
key: ${{ runner.os }}-sonar
restore-keys: ${{ runner.os }}-sonar
- name: Build and analyze pull request target
if: ${{ github.event_name == 'pull_request' }}
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
SONAR_TOKEN: ${{ secrets.SONAR_TOKEN_BACKEND }}
HEAD_REF: ${{ github.head_ref }}
BASE_REF: ${{ github.base_ref }}
run: |
./mvnw -B -ntp versions:set -DnewVersion=${{ github.event.pull_request.head.sha }}
./mvnw -B -V -ntp verify org.sonarsource.scanner.maven:sonar-maven-plugin:sonar \
-Dsonar.projectKey=com.provectus:kafka-ui_backend \
-Dsonar.pullrequest.key=${{ github.event.pull_request.number }} \
-Dsonar.pullrequest.branch=$HEAD_REF \
-Dsonar.pullrequest.base=$BASE_REF
- name: Build and analyze push master
if: ${{ github.event_name == 'push' }}
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
SONAR_TOKEN: ${{ secrets.SONAR_TOKEN_BACKEND }}
run: |
./mvnw -B -ntp versions:set -DnewVersion=$GITHUB_SHA
./mvnw -B -V -ntp verify org.sonarsource.scanner.maven:sonar-maven-plugin:sonar \
-Dsonar.projectKey=com.provectus:kafka-ui_backend

13
.github/workflows/block_merge.yml vendored Normal file
View file

@ -0,0 +1,13 @@
name: "Infra: PR block merge"
on:
pull_request:
types: [opened, labeled, unlabeled, synchronize]
jobs:
block_merge:
runs-on: ubuntu-latest
steps:
- uses: mheap/github-action-required-labels@v5
with:
mode: exactly
count: 0
labels: "status/blocked, status/needs-attention, status/on-hold, status/pending, status/triage, status/pending-backend, status/pending-frontend, status/pending-QA"

105
.github/workflows/branch-deploy.yml vendored Normal file
View file

@ -0,0 +1,105 @@
name: "Infra: Feature Testing: Init env"
on:
workflow_dispatch:
pull_request:
types: ['labeled']
jobs:
build:
if: ${{ github.event.label.name == 'status/feature_testing' || github.event.label.name == 'status/feature_testing_public' }}
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
with:
ref: ${{ github.event.pull_request.head.sha }}
- name: get branch name
id: extract_branch
run: |
tag='pr${{ github.event.pull_request.number }}'
echo "tag=${tag}" >> $GITHUB_OUTPUT
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- name: Set up JDK
uses: actions/setup-java@v3
with:
java-version: '17'
distribution: 'zulu'
cache: 'maven'
- name: Build
id: build
run: |
./mvnw -B -ntp versions:set -DnewVersion=$GITHUB_SHA
./mvnw -B -V -ntp clean package -Pprod -DskipTests
export VERSION=$(./mvnw -q -Dexec.executable=echo -Dexec.args='${project.version}' --non-recursive exec:exec)
echo "version=${VERSION}" >> $GITHUB_OUTPUT
- name: Set up QEMU
uses: docker/setup-qemu-action@v2
- name: Set up Docker Buildx
id: buildx
uses: docker/setup-buildx-action@v2
- name: Cache Docker layers
uses: actions/cache@v3
with:
path: /tmp/.buildx-cache
key: ${{ runner.os }}-buildx-${{ github.sha }}
restore-keys: |
${{ runner.os }}-buildx-
- name: Configure AWS credentials for Kafka-UI account
uses: aws-actions/configure-aws-credentials@v3
with:
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
aws-region: eu-central-1
- name: Login to Amazon ECR
id: login-ecr
uses: aws-actions/amazon-ecr-login@v1
- name: Build and push
id: docker_build_and_push
uses: docker/build-push-action@v4
with:
builder: ${{ steps.buildx.outputs.name }}
context: kafka-ui-api
push: true
tags: 297478128798.dkr.ecr.eu-central-1.amazonaws.com/kafka-ui:${{ steps.extract_branch.outputs.tag }}
build-args: |
JAR_FILE=kafka-ui-api-${{ steps.build.outputs.version }}.jar
cache-from: type=local,src=/tmp/.buildx-cache
cache-to: type=local,dest=/tmp/.buildx-cache
outputs:
tag: ${{ steps.extract_branch.outputs.tag }}
make-branch-env:
needs: build
runs-on: ubuntu-latest
steps:
- name: clone
run: |
git clone https://infra-tech:${{ secrets.INFRA_USER_ACCESS_TOKEN }}@github.com/provectus/kafka-ui-infra.git --branch envs
- name: create deployment
run: |
cd kafka-ui-infra/aws-infrastructure4eks/argocd/scripts
echo "Branch:${{ needs.build.outputs.tag }}"
./kafka-ui-deployment-from-branch.sh ${{ needs.build.outputs.tag }} ${{ github.event.label.name }} ${{ secrets.FEATURE_TESTING_UI_PASSWORD }}
git config --global user.email "infra-tech@provectus.com"
git config --global user.name "infra-tech"
git add ../kafka-ui-from-branch/
git commit -m "added env:${{ needs.build.outputs.deploy }}" && git push || true
- name: update status check for private deployment
if: ${{ github.event.label.name == 'status/feature_testing' }}
uses: Sibz/github-status-action@v1.1.6
with:
authToken: ${{secrets.GITHUB_TOKEN}}
context: "Click Details button to open custom deployment page"
state: "success"
sha: ${{ github.event.pull_request.head.sha || github.sha }}
target_url: "http://${{ needs.build.outputs.tag }}.internal.kafka-ui.provectus.io"
- name: update status check for public deployment
if: ${{ github.event.label.name == 'status/feature_testing_public' }}
uses: Sibz/github-status-action@v1.1.6
with:
authToken: ${{secrets.GITHUB_TOKEN}}
context: "Click Details button to open custom deployment page"
state: "success"
sha: ${{ github.event.pull_request.head.sha || github.sha }}
target_url: "http://${{ needs.build.outputs.tag }}.internal.kafka-ui.provectus.io"

22
.github/workflows/branch-remove.yml vendored Normal file
View file

@ -0,0 +1,22 @@
name: "Infra: Feature Testing: Destroy env"
on:
workflow_dispatch:
pull_request:
types: ['unlabeled', 'closed']
jobs:
remove:
runs-on: ubuntu-latest
if: ${{ (github.event.label.name == 'status/feature_testing' || github.event.label.name == 'status/feature_testing_public') || (github.event.action == 'closed' && (contains(github.event.pull_request.labels.*.name, 'status/feature_testing') || contains(github.event.pull_request.labels.*.name, 'status/feature_testing_public'))) }}
steps:
- uses: actions/checkout@v3
- name: clone
run: |
git clone https://infra-tech:${{ secrets.INFRA_USER_ACCESS_TOKEN }}@github.com/provectus/kafka-ui-infra.git --branch envs
- name: remove env
run: |
cd kafka-ui-infra/aws-infrastructure4eks/argocd/scripts
./delete-env.sh pr${{ github.event.pull_request.number }} || true
git config --global user.email "infra-tech@provectus.com"
git config --global user.name "infra-tech"
git add ../kafka-ui-from-branch/
git commit -m "removed env:${{ needs.build.outputs.deploy }}" && git push || true

View file

@ -0,0 +1,74 @@
name: "Infra: Image Testing: Deploy"
on:
workflow_dispatch:
pull_request:
types: ['labeled']
jobs:
build:
if: ${{ github.event.label.name == 'status/image_testing' }}
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
with:
ref: ${{ github.event.pull_request.head.sha }}
- name: get branch name
id: extract_branch
run: |
tag='${{ github.event.pull_request.number }}'
echo "tag=${tag}" >> $GITHUB_OUTPUT
- name: Set up JDK
uses: actions/setup-java@v3
with:
java-version: '17'
distribution: 'zulu'
cache: 'maven'
- name: Build
id: build
run: |
./mvnw -B -ntp versions:set -DnewVersion=$GITHUB_SHA
./mvnw -B -V -ntp clean package -Pprod -DskipTests
export VERSION=$(./mvnw -q -Dexec.executable=echo -Dexec.args='${project.version}' --non-recursive exec:exec)
echo "version=${VERSION}" >> $GITHUB_OUTPUT
- name: Set up QEMU
uses: docker/setup-qemu-action@v2
- name: Set up Docker Buildx
id: buildx
uses: docker/setup-buildx-action@v2
- name: Cache Docker layers
uses: actions/cache@v3
with:
path: /tmp/.buildx-cache
key: ${{ runner.os }}-buildx-${{ github.sha }}
restore-keys: |
${{ runner.os }}-buildx-
- name: Configure AWS credentials for Kafka-UI account
uses: aws-actions/configure-aws-credentials@v3
with:
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
aws-region: us-east-1
- name: Login to Amazon ECR
id: login-ecr
uses: aws-actions/amazon-ecr-login@v1
with:
registry-type: 'public'
- name: Build and push
id: docker_build_and_push
uses: docker/build-push-action@v4
with:
builder: ${{ steps.buildx.outputs.name }}
context: kafka-ui-api
push: true
tags: public.ecr.aws/provectus/kafka-ui-custom-build:${{ steps.extract_branch.outputs.tag }}
build-args: |
JAR_FILE=kafka-ui-api-${{ steps.build.outputs.version }}.jar
cache-from: type=local,src=/tmp/.buildx-cache
cache-to: type=local,dest=/tmp/.buildx-cache
- name: make comment with private deployment link
uses: peter-evans/create-or-update-comment@v3
with:
issue-number: ${{ github.event.pull_request.number }}
body: |
Image published at public.ecr.aws/provectus/kafka-ui-custom-build:${{ steps.extract_branch.outputs.tag }}
outputs:
tag: ${{ steps.extract_branch.outputs.tag }}

79
.github/workflows/codeql-analysis.yml vendored Normal file
View file

@ -0,0 +1,79 @@
# For most projects, this workflow file will not need changing; you simply need
# to commit it to your repository.
#
# You may wish to alter this file to override the set of languages analyzed,
# or to provide custom queries or build logic.
#
# ******** NOTE ********
# We have attempted to detect the languages in your repository. Please check
# the `language` matrix defined below to confirm you have the correct set of
# supported CodeQL languages.
#
name: "CodeQL"
on:
push:
branches: [ master ]
pull_request:
# The branches below must be a subset of the branches above
branches: [ master ]
paths:
- 'kafka-ui-contract/**'
- 'kafka-ui-react-app/**'
- 'kafka-ui-api/**'
- 'kafka-ui-serde-api/**'
schedule:
- cron: '39 15 * * 6'
jobs:
analyze:
name: Analyze
runs-on: ubuntu-latest
strategy:
fail-fast: false
matrix:
language: [ 'javascript', 'java' ]
# CodeQL supports [ 'cpp', 'csharp', 'go', 'java', 'javascript', 'python' ]
# Learn more:
# https://docs.github.com/en/free-pro-team@latest/github/finding-security-vulnerabilities-and-errors-in-your-code/configuring-code-scanning#changing-the-languages-that-are-analyzed
steps:
- name: Checkout repository
uses: actions/checkout@v3
# Initializes the CodeQL tools for scanning.
- name: Initialize CodeQL
uses: github/codeql-action/init@v2
with:
languages: ${{ matrix.language }}
# If you wish to specify custom queries, you can do so here or in a config file.
# By default, queries listed here will override any specified in a config file.
# Prefix the list here with "+" to use these queries and those in the config file.
# queries: ./path/to/local/query, your-org/your-repo/queries@main
- name: Set up JDK
uses: actions/setup-java@v3
with:
java-version: '17'
distribution: 'zulu'
cache: 'maven'
# Autobuild attempts to build any compiled languages (C/C++, C#, or Java).
# If this step fails, then you should remove it and run the build manually (see below)
- name: Autobuild
uses: github/codeql-action/autobuild@v2
# Command-line programs to run using the OS shell.
# 📚 https://git.io/JvXDl
# ✏️ If the Autobuild fails above, remove it and uncomment the following three lines
# and modify them (or add more) to build your code if your project
# uses a compiled language
#- run: |
# make bootstrap
# make release
- name: Perform CodeQL Analysis
uses: github/codeql-action/analyze@v2

62
.github/workflows/cve.yaml vendored Normal file
View file

@ -0,0 +1,62 @@
name: CVE checks docker master
on:
workflow_dispatch:
schedule:
# * is a special character in YAML so you have to quote this string
- cron: '0 8 15 * *'
jobs:
build-and-test:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- name: Set up JDK
uses: actions/setup-java@v3
with:
java-version: '17'
distribution: 'zulu'
cache: 'maven'
- name: Build project
id: build
run: |
./mvnw -B -ntp versions:set -DnewVersion=$GITHUB_SHA
./mvnw -B -V -ntp clean package -DskipTests
export VERSION=$(./mvnw -q -Dexec.executable=echo -Dexec.args='${project.version}' --non-recursive exec:exec)
echo "version=${VERSION}" >> $GITHUB_OUTPUT
- name: Set up QEMU
uses: docker/setup-qemu-action@v2
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v2
- name: Cache Docker layers
uses: actions/cache@v3
with:
path: /tmp/.buildx-cache
key: ${{ runner.os }}-buildx-${{ github.sha }}
restore-keys: |
${{ runner.os }}-buildx-
- name: Build docker image
uses: docker/build-push-action@v4
with:
builder: ${{ steps.buildx.outputs.name }}
context: kafka-ui-api
platforms: linux/amd64
push: false
load: true
tags: |
provectuslabs/kafka-ui:${{ steps.build.outputs.version }}
build-args: |
JAR_FILE=kafka-ui-api-${{ steps.build.outputs.version }}.jar
cache-from: type=local,src=/tmp/.buildx-cache
cache-to: type=local,dest=/tmp/.buildx-cache
- name: Run CVE checks
uses: aquasecurity/trivy-action@0.12.0
with:
image-ref: "provectuslabs/kafka-ui:${{ steps.build.outputs.version }}"
format: "table"
exit-code: "1"

View file

@ -0,0 +1,34 @@
name: "Infra: Image Testing: Delete"
on:
workflow_dispatch:
pull_request:
types: ['unlabeled', 'closed']
jobs:
remove:
if: ${{ github.event.label.name == 'status/image_testing' || ( github.event.action == 'closed' && (contains(github.event.pull_request.labels, 'status/image_testing'))) }}
runs-on: ubuntu-latest
steps:
- name: get branch name
id: extract_branch
run: |
echo
tag='${{ github.event.pull_request.number }}'
echo "tag=${tag}" >> $GITHUB_OUTPUT
- name: Configure AWS credentials for Kafka-UI account
uses: aws-actions/configure-aws-credentials@v3
with:
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
aws-region: us-east-1
- name: Login to Amazon ECR
id: login-ecr
uses: aws-actions/amazon-ecr-login@v1
with:
registry-type: 'public'
- name: Remove from ECR
id: remove_from_ecr
run: |
aws ecr-public batch-delete-image \
--repository-name kafka-ui-custom-build \
--image-ids imageTag=${{ steps.extract_branch.outputs.tag }} \
--region us-east-1

23
.github/workflows/documentation.yaml vendored Normal file
View file

@ -0,0 +1,23 @@
name: "Infra: Docs: URL linter"
on:
pull_request:
types:
- opened
- labeled
- reopened
- synchronize
paths:
- 'documentation/**'
- '**.md'
jobs:
build-and-test:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- name: Check URLs in files
uses: urlstechie/urlchecker-action@0.0.34
with:
exclude_patterns: localhost,127.0.,192.168.
exclude_urls: https://api.server,https://graph.microsoft.com/User.Read,https://dev-a63ggcut.auth0.com/,http://main-schema-registry:8081,http://schema-registry:8081,http://another-yet-schema-registry:8081,http://another-schema-registry:8081
print_all: false
file_types: .md

88
.github/workflows/e2e-automation.yml vendored Normal file
View file

@ -0,0 +1,88 @@
name: "E2E: Automation suite"
on:
workflow_dispatch:
inputs:
test_suite:
description: 'Select test suite to run'
default: 'regression'
required: true
type: choice
options:
- regression
- sanity
- smoke
qase_token:
description: 'Set Qase token to enable integration'
required: false
type: string
jobs:
build-and-test:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
with:
ref: ${{ github.sha }}
- name: Configure AWS credentials
uses: aws-actions/configure-aws-credentials@v3
with:
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
aws-region: eu-central-1
- name: Set up environment
id: set_env_values
run: |
cat "./kafka-ui-e2e-checks/.env.ci" >> "./kafka-ui-e2e-checks/.env"
- name: Pull with Docker
id: pull_chrome
run: |
docker pull selenoid/vnc_chrome:103.0
- name: Set up JDK
uses: actions/setup-java@v3
with:
java-version: '17'
distribution: 'zulu'
cache: 'maven'
- name: Build with Maven
id: build_app
run: |
./mvnw -B -ntp versions:set -DnewVersion=${{ github.sha }}
./mvnw -B -V -ntp clean install -Pprod -Dmaven.test.skip=true ${{ github.event.inputs.extraMavenOptions }}
- name: Compose with Docker
id: compose_app
# use the following command until #819 will be fixed
run: |
docker-compose -f kafka-ui-e2e-checks/docker/selenoid-git.yaml up -d
docker-compose -f ./documentation/compose/e2e-tests.yaml up -d
- name: Run test suite
run: |
./mvnw -B -ntp versions:set -DnewVersion=${{ github.sha }}
./mvnw -B -V -ntp -DQASEIO_API_TOKEN=${{ github.event.inputs.qase_token }} -Dsurefire.suiteXmlFiles='src/test/resources/${{ github.event.inputs.test_suite }}.xml' -Dsuite=${{ github.event.inputs.test_suite }} -f 'kafka-ui-e2e-checks' test -Pprod
- name: Generate Allure report
uses: simple-elf/allure-report-action@master
if: always()
id: allure-report
with:
allure_results: ./kafka-ui-e2e-checks/allure-results
gh_pages: allure-results
allure_report: allure-report
subfolder: allure-results
report_url: "http://kafkaui-allure-reports.s3-website.eu-central-1.amazonaws.com"
- uses: jakejarvis/s3-sync-action@master
if: always()
env:
AWS_S3_BUCKET: 'kafkaui-allure-reports'
AWS_REGION: 'eu-central-1'
SOURCE_DIR: 'allure-history/allure-results'
- name: Deploy report to Amazon S3
if: always()
uses: Sibz/github-status-action@v1.1.6
with:
authToken: ${{secrets.GITHUB_TOKEN}}
context: "Click Details button to open Allure report"
state: "success"
sha: ${{ github.sha }}
target_url: http://kafkaui-allure-reports.s3-website.eu-central-1.amazonaws.com/${{ github.run_number }}
- name: Dump Docker logs on failure
if: failure()
uses: jwalton/gh-docker-logs@v2.2.1

82
.github/workflows/e2e-checks.yaml vendored Normal file
View file

@ -0,0 +1,82 @@
name: "E2E: PR healthcheck"
on:
pull_request_target:
types: [ "opened", "edited", "reopened", "synchronize" ]
paths:
- "kafka-ui-api/**"
- "kafka-ui-contract/**"
- "kafka-ui-react-app/**"
- "kafka-ui-e2e-checks/**"
- "pom.xml"
permissions:
statuses: write
jobs:
build-and-test:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
with:
ref: ${{ github.event.pull_request.head.sha }}
- name: Configure AWS credentials
uses: aws-actions/configure-aws-credentials@v3
with:
aws-access-key-id: ${{ secrets.S3_AWS_ACCESS_KEY_ID }}
aws-secret-access-key: ${{ secrets.S3_AWS_SECRET_ACCESS_KEY }}
aws-region: eu-central-1
- name: Set up environment
id: set_env_values
run: |
cat "./kafka-ui-e2e-checks/.env.ci" >> "./kafka-ui-e2e-checks/.env"
- name: Pull with Docker
id: pull_chrome
run: |
docker pull selenoid/vnc_chrome:103.0
- name: Set up JDK
uses: actions/setup-java@v3
with:
java-version: '17'
distribution: 'zulu'
cache: 'maven'
- name: Build with Maven
id: build_app
run: |
./mvnw -B -ntp versions:set -DnewVersion=${{ github.event.pull_request.head.sha }}
./mvnw -B -V -ntp clean install -Pprod -Dmaven.test.skip=true ${{ github.event.inputs.extraMavenOptions }}
- name: Compose with Docker
id: compose_app
# use the following command until #819 will be fixed
run: |
docker-compose -f kafka-ui-e2e-checks/docker/selenoid-git.yaml up -d
docker-compose -f ./documentation/compose/e2e-tests.yaml up -d && until [ "$(docker exec kafka-ui wget --spider --server-response http://localhost:8080/actuator/health 2>&1 | grep -c 'HTTP/1.1 200 OK')" == "1" ]; do echo "Waiting for kafka-ui ..." && sleep 1; done
- name: Run test suite
run: |
./mvnw -B -ntp versions:set -DnewVersion=${{ github.event.pull_request.head.sha }}
./mvnw -B -V -ntp -Dsurefire.suiteXmlFiles='src/test/resources/smoke.xml' -f 'kafka-ui-e2e-checks' test -Pprod
- name: Generate allure report
uses: simple-elf/allure-report-action@master
if: always()
id: allure-report
with:
allure_results: ./kafka-ui-e2e-checks/allure-results
gh_pages: allure-results
allure_report: allure-report
subfolder: allure-results
report_url: "http://kafkaui-allure-reports.s3-website.eu-central-1.amazonaws.com"
- uses: jakejarvis/s3-sync-action@master
if: always()
env:
AWS_S3_BUCKET: 'kafkaui-allure-reports'
AWS_REGION: 'eu-central-1'
SOURCE_DIR: 'allure-history/allure-results'
- name: Deploy report to Amazon S3
if: always()
uses: Sibz/github-status-action@v1.1.6
with:
authToken: ${{secrets.GITHUB_TOKEN}}
context: "Click Details button to open Allure report"
state: "success"
sha: ${{ github.event.pull_request.head.sha || github.sha }}
target_url: http://kafkaui-allure-reports.s3-website.eu-central-1.amazonaws.com/${{ github.run_number }}
- name: Dump docker logs on failure
if: failure()
uses: jwalton/gh-docker-logs@v2.2.1

43
.github/workflows/e2e-manual.yml vendored Normal file
View file

@ -0,0 +1,43 @@
name: "E2E: Manual suite"
on:
workflow_dispatch:
inputs:
test_suite:
description: 'Select test suite to run'
default: 'manual'
required: true
type: choice
options:
- manual
- qase
qase_token:
description: 'Set Qase token to enable integration'
required: true
type: string
jobs:
build-and-test:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
with:
ref: ${{ github.sha }}
- name: Set up environment
id: set_env_values
run: |
cat "./kafka-ui-e2e-checks/.env.ci" >> "./kafka-ui-e2e-checks/.env"
- name: Set up JDK
uses: actions/setup-java@v3
with:
java-version: '17'
distribution: 'zulu'
cache: 'maven'
- name: Build with Maven
id: build_app
run: |
./mvnw -B -ntp versions:set -DnewVersion=${{ github.sha }}
./mvnw -B -V -ntp clean install -Pprod -Dmaven.test.skip=true ${{ github.event.inputs.extraMavenOptions }}
- name: Run test suite
run: |
./mvnw -B -ntp versions:set -DnewVersion=${{ github.sha }}
./mvnw -B -V -ntp -DQASEIO_API_TOKEN=${{ github.event.inputs.qase_token }} -Dsurefire.suiteXmlFiles='src/test/resources/${{ github.event.inputs.test_suite }}.xml' -Dsuite=${{ github.event.inputs.test_suite }} -f 'kafka-ui-e2e-checks' test -Pprod

75
.github/workflows/e2e-weekly.yml vendored Normal file
View file

@ -0,0 +1,75 @@
name: "E2E: Weekly suite"
on:
schedule:
- cron: '0 1 * * 1'
jobs:
build-and-test:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
with:
ref: ${{ github.sha }}
- name: Configure AWS credentials
uses: aws-actions/configure-aws-credentials@v3
with:
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
aws-region: eu-central-1
- name: Set up environment
id: set_env_values
run: |
cat "./kafka-ui-e2e-checks/.env.ci" >> "./kafka-ui-e2e-checks/.env"
- name: Pull with Docker
id: pull_chrome
run: |
docker pull selenoid/vnc_chrome:103.0
- name: Set up JDK
uses: actions/setup-java@v3
with:
java-version: '17'
distribution: 'zulu'
cache: 'maven'
- name: Build with Maven
id: build_app
run: |
./mvnw -B -ntp versions:set -DnewVersion=${{ github.sha }}
./mvnw -B -V -ntp clean install -Pprod -Dmaven.test.skip=true ${{ github.event.inputs.extraMavenOptions }}
- name: Compose with Docker
id: compose_app
# use the following command until #819 will be fixed
run: |
docker-compose -f kafka-ui-e2e-checks/docker/selenoid-git.yaml up -d
docker-compose -f ./documentation/compose/e2e-tests.yaml up -d
- name: Run test suite
run: |
./mvnw -B -ntp versions:set -DnewVersion=${{ github.sha }}
./mvnw -B -V -ntp -DQASEIO_API_TOKEN=${{ secrets.QASEIO_API_TOKEN }} -Dsurefire.suiteXmlFiles='src/test/resources/sanity.xml' -Dsuite=weekly -f 'kafka-ui-e2e-checks' test -Pprod
- name: Generate Allure report
uses: simple-elf/allure-report-action@master
if: always()
id: allure-report
with:
allure_results: ./kafka-ui-e2e-checks/allure-results
gh_pages: allure-results
allure_report: allure-report
subfolder: allure-results
report_url: "http://kafkaui-allure-reports.s3-website.eu-central-1.amazonaws.com"
- uses: jakejarvis/s3-sync-action@master
if: always()
env:
AWS_S3_BUCKET: 'kafkaui-allure-reports'
AWS_REGION: 'eu-central-1'
SOURCE_DIR: 'allure-history/allure-results'
- name: Deploy report to Amazon S3
if: always()
uses: Sibz/github-status-action@v1.1.6
with:
authToken: ${{secrets.GITHUB_TOKEN}}
context: "Click Details button to open Allure report"
state: "success"
sha: ${{ github.sha }}
target_url: http://kafkaui-allure-reports.s3-website.eu-central-1.amazonaws.com/${{ github.run_number }}
- name: Dump Docker logs on failure
if: failure()
uses: jwalton/gh-docker-logs@v2.2.1

58
.github/workflows/frontend.yaml vendored Normal file
View file

@ -0,0 +1,58 @@
name: "Frontend: PR/master build & test"
on:
push:
branches:
- master
pull_request_target:
types: ["opened", "edited", "reopened", "synchronize"]
paths:
- "kafka-ui-contract/**"
- "kafka-ui-react-app/**"
permissions:
checks: write
pull-requests: write
jobs:
build-and-test:
env:
CI: true
NODE_ENV: dev
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
with:
# Disabling shallow clone is recommended for improving relevancy of reporting
fetch-depth: 0
ref: ${{ github.event.pull_request.head.sha }}
- uses: pnpm/action-setup@v2.4.0
with:
version: 8.6.12
- name: Install node
uses: actions/setup-node@v3.8.1
with:
node-version: "18.17.1"
cache: "pnpm"
cache-dependency-path: "./kafka-ui-react-app/pnpm-lock.yaml"
- name: Install Node dependencies
run: |
cd kafka-ui-react-app/
pnpm install --frozen-lockfile
- name: Generate sources
run: |
cd kafka-ui-react-app/
pnpm gen:sources
- name: Linter
run: |
cd kafka-ui-react-app/
pnpm lint:CI
- name: Tests
run: |
cd kafka-ui-react-app/
pnpm test:CI
- name: SonarCloud Scan
uses: sonarsource/sonarcloud-github-action@master
with:
projectBaseDir: ./kafka-ui-react-app
args: -Dsonar.pullrequest.key=${{ github.event.pull_request.number }} -Dsonar.pullrequest.branch=${{ github.head_ref }} -Dsonar.pullrequest.base=${{ github.base_ref }}
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
SONAR_TOKEN: ${{ secrets.SONAR_TOKEN_FRONTEND }}

84
.github/workflows/master.yaml vendored Normal file
View file

@ -0,0 +1,84 @@
name: "Master: Build & deploy"
on:
workflow_dispatch:
push:
branches: [ "master" ]
jobs:
build:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
with:
ref: ${{ github.event.pull_request.head.sha }}
- name: Set up JDK
uses: actions/setup-java@v3
with:
java-version: '17'
distribution: 'zulu'
cache: 'maven'
- name: Build
id: build
run: |
./mvnw -B -ntp versions:set -DnewVersion=$GITHUB_SHA
./mvnw -V -B -ntp clean package -Pprod -DskipTests
export VERSION=$(./mvnw -q -Dexec.executable=echo -Dexec.args='${project.version}' --non-recursive exec:exec)
echo "version=${VERSION}" >> $GITHUB_OUTPUT
#################
# #
# Docker images #
# #
#################
- name: Set up QEMU
uses: docker/setup-qemu-action@v2
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v2
- name: Cache Docker layers
uses: actions/cache@v3
with:
path: /tmp/.buildx-cache
key: ${{ runner.os }}-buildx-${{ github.sha }}
restore-keys: |
${{ runner.os }}-buildx-
- name: Login to DockerHub
uses: docker/login-action@v2
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Build and push
id: docker_build_and_push
uses: docker/build-push-action@v4
with:
builder: ${{ steps.buildx.outputs.name }}
context: kafka-ui-api
platforms: linux/amd64,linux/arm64
provenance: false
push: true
tags: |
provectuslabs/kafka-ui:${{ steps.build.outputs.version }}
provectuslabs/kafka-ui:master
build-args: |
JAR_FILE=kafka-ui-api-${{ steps.build.outputs.version }}.jar
cache-from: type=local,src=/tmp/.buildx-cache
cache-to: type=local,dest=/tmp/.buildx-cache
#################################
# #
# Master image digest update #
# #
#################################
- name: update-master-deployment
run: |
git clone https://infra-tech:${{ secrets.INFRA_USER_ACCESS_TOKEN }}@github.com/provectus/kafka-ui-infra.git --branch master
cd kafka-ui-infra/aws-infrastructure4eks/argocd/scripts
echo "Image digest is:${{ steps.docker_build_and_push.outputs.digest }}"
./kafka-ui-update-master-digest.sh ${{ steps.docker_build_and_push.outputs.digest }}
git config --global user.email "infra-tech@provectus.com"
git config --global user.name "infra-tech"
git add ../kafka-ui/*
git commit -m "updated master image digest: ${{ steps.docker_build_and_push.outputs.digest }}" && git push

16
.github/workflows/pr-checks.yaml vendored Normal file
View file

@ -0,0 +1,16 @@
name: "PR: Checklist linter"
on:
pull_request_target:
types: [opened, edited, synchronize, reopened]
permissions:
checks: write
jobs:
task-check:
runs-on: ubuntu-latest
steps:
- uses: kentaro-m/task-completed-checker-action@v0.1.2
with:
repo-token: "${{ secrets.GITHUB_TOKEN }}"
- uses: dekinderfiets/pr-description-enforcer@0.0.1
with:
repo-token: "${{ secrets.GITHUB_TOKEN }}"

View file

@ -0,0 +1,30 @@
name: "Infra: Release: Serde API"
on: workflow_dispatch
jobs:
release-serde-api:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
with:
fetch-depth: 0
- run: |
git config user.name github-actions
git config user.email github-actions@github.com
- name: Set up JDK
uses: actions/setup-java@v3
with:
java-version: "17"
distribution: "zulu"
cache: "maven"
- id: install-secret-key
name: Install GPG secret key
run: |
cat <(echo -e "${{ secrets.GPG_PRIVATE_KEY }}") | gpg --batch --import
- name: Publish to Maven Central
run: |
mvn source:jar javadoc:jar package gpg:sign -Dgpg.passphrase=${{ secrets.GPG_PASSPHRASE }} -Dserver.username=${{ secrets.NEXUS_USERNAME }} -Dserver.password=${{ secrets.NEXUS_PASSWORD }} nexus-staging:deploy -pl kafka-ui-serde-api -s settings.xml

100
.github/workflows/release.yaml vendored Normal file
View file

@ -0,0 +1,100 @@
name: "Infra: Release"
on:
release:
types: [published]
jobs:
release:
runs-on: ubuntu-latest
outputs:
version: ${{steps.build.outputs.version}}
steps:
- uses: actions/checkout@v3
with:
fetch-depth: 0
ref: ${{ github.event.pull_request.head.sha }}
- run: |
git config user.name github-actions
git config user.email github-actions@github.com
- name: Set up JDK
uses: actions/setup-java@v3
with:
java-version: '17'
distribution: 'zulu'
cache: 'maven'
- name: Build with Maven
id: build
run: |
./mvnw -B -ntp versions:set -DnewVersion=${{ github.event.release.tag_name }}
./mvnw -B -V -ntp clean package -Pprod -DskipTests
export VERSION=$(./mvnw -q -Dexec.executable=echo -Dexec.args='${project.version}' --non-recursive exec:exec)
echo "version=${VERSION}" >> $GITHUB_OUTPUT
- name: Upload files to a GitHub release
uses: svenstaro/upload-release-action@2.7.0
with:
repo_token: ${{ secrets.GITHUB_TOKEN }}
file: kafka-ui-api/target/kafka-ui-api-${{ steps.build.outputs.version }}.jar
tag: ${{ github.event.release.tag_name }}
- name: Archive JAR
uses: actions/upload-artifact@v3
with:
name: kafka-ui-${{ steps.build.outputs.version }}
path: kafka-ui-api/target/kafka-ui-api-${{ steps.build.outputs.version }}.jar
#################
# #
# Docker images #
# #
#################
- name: Set up QEMU
uses: docker/setup-qemu-action@v2
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v2
- name: Cache Docker layers
uses: actions/cache@v3
with:
path: /tmp/.buildx-cache
key: ${{ runner.os }}-buildx-${{ github.sha }}
restore-keys: |
${{ runner.os }}-buildx-
- name: Login to DockerHub
uses: docker/login-action@v2
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Build and push
id: docker_build_and_push
uses: docker/build-push-action@v4
with:
builder: ${{ steps.buildx.outputs.name }}
context: kafka-ui-api
platforms: linux/amd64,linux/arm64
provenance: false
push: true
tags: |
provectuslabs/kafka-ui:${{ steps.build.outputs.version }}
provectuslabs/kafka-ui:latest
build-args: |
JAR_FILE=kafka-ui-api-${{ steps.build.outputs.version }}.jar
cache-from: type=local,src=/tmp/.buildx-cache
cache-to: type=local,dest=/tmp/.buildx-cache
charts:
runs-on: ubuntu-latest
needs: release
steps:
- name: Repository Dispatch
uses: peter-evans/repository-dispatch@v2
with:
token: ${{ secrets.CHARTS_ACTIONS_TOKEN }}
repository: provectus/kafka-ui-charts
event-type: prepare-helm-release
client-payload: '{"appversion": "${{ needs.release.outputs.version }}"}'

34
.github/workflows/release_drafter.yml vendored Normal file
View file

@ -0,0 +1,34 @@
name: "Infra: Release Drafter run"
on:
push:
branches:
- master
workflow_dispatch:
inputs:
version:
description: 'Release version'
required: false
branch:
description: 'Target branch'
required: false
default: 'master'
permissions:
contents: read
jobs:
update_release_draft:
runs-on: ubuntu-latest
permissions:
contents: write
pull-requests: write
steps:
- uses: release-drafter/release-drafter@v5
with:
config-name: release_drafter.yaml
disable-autolabeler: true
version: ${{ github.event.inputs.version }}
commitish: ${{ github.event.inputs.branch }}
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}

View file

@ -0,0 +1,94 @@
name: "Infra: Feature Testing Public: Init env"
on:
workflow_dispatch:
inputs:
ENV_NAME:
description: 'Will be used as subdomain in the public URL.'
required: true
default: 'demo'
jobs:
build:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
with:
ref: ${{ github.event.pull_request.head.sha }}
- name: get branch name
id: extract_branch
run: |
tag="${{ github.event.inputs.ENV_NAME }}-$(date '+%F-%H-%M-%S')"
echo "tag=${tag}" >> $GITHUB_OUTPUT
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- name: Set up JDK
uses: actions/setup-java@v3
with:
java-version: '17'
distribution: 'zulu'
cache: 'maven'
- name: Build
id: build
run: |
./mvnw -B -ntp versions:set -DnewVersion=$GITHUB_SHA
./mvnw -B -V -ntp clean package -Pprod -DskipTests
export VERSION=$(./mvnw -q -Dexec.executable=echo -Dexec.args='${project.version}' --non-recursive exec:exec)
echo "version=${VERSION}" >> $GITHUB_OUTPUT
- name: Set up QEMU
uses: docker/setup-qemu-action@v2
- name: Set up Docker Buildx
id: buildx
uses: docker/setup-buildx-action@v2
- name: Cache Docker layers
uses: actions/cache@v3
with:
path: /tmp/.buildx-cache
key: ${{ runner.os }}-buildx-${{ github.sha }}
restore-keys: |
${{ runner.os }}-buildx-
- name: Configure AWS credentials for Kafka-UI account
uses: aws-actions/configure-aws-credentials@v3
with:
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
aws-region: eu-central-1
- name: Login to Amazon ECR
id: login-ecr
uses: aws-actions/amazon-ecr-login@v1
- name: Build and push
id: docker_build_and_push
uses: docker/build-push-action@v4
with:
builder: ${{ steps.buildx.outputs.name }}
context: kafka-ui-api
push: true
tags: 297478128798.dkr.ecr.eu-central-1.amazonaws.com/kafka-ui:${{ steps.extract_branch.outputs.tag }}
build-args: |
JAR_FILE=kafka-ui-api-${{ steps.build.outputs.version }}.jar
cache-from: type=local,src=/tmp/.buildx-cache
cache-to: type=local,dest=/tmp/.buildx-cache
outputs:
tag: ${{ steps.extract_branch.outputs.tag }}
separate-env-create:
runs-on: ubuntu-latest
needs: build
steps:
- name: clone
run: |
git clone https://infra-tech:${{ secrets.INFRA_USER_ACCESS_TOKEN }}@github.com/provectus/kafka-ui-infra.git --branch envs
- name: separate env create
run: |
cd kafka-ui-infra/aws-infrastructure4eks/argocd/scripts
bash separate_env_create.sh ${{ github.event.inputs.ENV_NAME }} ${{ secrets.FEATURE_TESTING_UI_PASSWORD }} ${{ needs.build.outputs.tag }}
git config --global user.email "infra-tech@provectus.com"
git config --global user.name "infra-tech"
git add -A
git commit -m "separate env added: ${{ github.event.inputs.ENV_NAME }}" && git push || true
- name: echo separate environment public link
run: |
echo "Please note, separate environment creation takes up to 5-10 minutes."
echo "Separate environment will be available at http://${{ github.event.inputs.ENV_NAME }}.kafka-ui.provectus.io"
echo "Username: admin"

View file

@ -0,0 +1,24 @@
name: "Infra: Feature Testing Public: Destroy env"
on:
workflow_dispatch:
inputs:
ENV_NAME:
description: 'Will be used to remove previously deployed separate environment.'
required: true
default: 'demo'
jobs:
separate-env-remove:
runs-on: ubuntu-latest
steps:
- name: clone
run: |
git clone https://infra-tech:${{ secrets.INFRA_USER_ACCESS_TOKEN }}@github.com/provectus/kafka-ui-infra.git --branch envs
- name: separate environment remove
run: |
cd kafka-ui-infra/aws-infrastructure4eks/argocd/scripts
bash separate_env_remove.sh ${{ github.event.inputs.ENV_NAME }}
git config --global user.email "infra-tech@provectus.com"
git config --global user.name "infra-tech"
git add -A
git commit -m "separate env removed: ${{ github.event.inputs.ENV_NAME }}" && git push || true

22
.github/workflows/stale.yaml vendored Normal file
View file

@ -0,0 +1,22 @@
name: 'Infra: Close stale issues'
on:
schedule:
- cron: '30 1 * * *'
jobs:
stale:
runs-on: ubuntu-latest
steps:
- uses: actions/stale@v8
with:
days-before-issue-stale: 7
days-before-issue-close: 3
days-before-pr-stale: 7
days-before-pr-close: 7
stale-issue-message: 'This issue has been automatically marked as stale because no requested feedback has been provided. It will be closed if no further activity occurs. Thank you for your contributions.'
stale-pr-message: 'This PR has been automatically marked as stale because no requested changes have been applied. It will be closed if no further activity occurs. Thank you for your contributions.'
stale-issue-label: 'status/stale'
stale-pr-label: 'status/stale'
only-labels: 'status/pending'
remove-issue-stale-when-updated: true
labels-to-remove-when-unstale: 'status/pending'

61
.github/workflows/terraform-deploy.yml vendored Normal file
View file

@ -0,0 +1,61 @@
name: "Infra: Terraform deploy"
on:
workflow_dispatch:
inputs:
applyTerraform:
description: 'Do you want to apply the infra-repo terraform? Possible values [plan/apply].'
required: true
default: 'plan'
KafkaUIInfraBranch:
description: 'Branch name of Kafka-UI-Infra repo, tf will be executed from this branch'
required: true
default: 'master'
jobs:
terraform:
name: Terraform
runs-on: ubuntu-latest
steps:
- name: Clone infra repo
run: |
echo "Cloning repo..."
git clone https://kafka-ui-infra:${{ secrets.KAFKA_UI_INFRA_TOKEN }}@gitlab.provectus.com/provectus-internals/kafka-ui-infra.git --branch ${{ github.event.inputs.KafkaUIInfraBranch }}
echo "Cd to deployment..."
cd kafka-ui-infra/aws-infrastructure4eks/deployment
echo "TF_DIR=$(pwd)" >> $GITHUB_ENV
echo "Terraform will be triggered in this dir $TF_DIR"
- name: Configure AWS credentials for Kafka-UI account
uses: aws-actions/configure-aws-credentials@v3
with:
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
aws-region: eu-central-1
- name: Terraform Install
uses: hashicorp/setup-terraform@v2
- name: Terraform init
id: init
run: cd $TF_DIR && terraform init --backend-config="../envs/pro/terraform-backend.tfvars"
- name: Terraform validate
id: validate
run: cd $TF_DIR && terraform validate -no-color
- name: Terraform plan
id: plan
run: |
cd $TF_DIR
export TF_VAR_github_connector_access_token=${{ secrets.SOURCE_CONNECTOR_GITHUB_TOKEN }}
export TF_VAR_repo_secret=${{ secrets.KAFKA_UI_INFRA_TOKEN }}
terraform plan --var-file="../envs/pro/eks.tfvars"
- name: Terraform apply
id: apply
if: ${{ github.event.inputs.applyTerraform == 'apply' }}
run: |
cd $TF_DIR
export TF_VAR_github_connector_access_token=${{ secrets.SOURCE_CONNECTOR_GITHUB_TOKEN }}
export TF_VAR_repo_secret=${{ secrets.KAFKA_UI_INFRA_TOKEN }}
terraform apply --var-file="../envs/pro/eks.tfvars" -auto-approve

14
.github/workflows/triage_issues.yml vendored Normal file
View file

@ -0,0 +1,14 @@
name: "Infra: Triage: Apply triage label for issues"
on:
issues:
types:
- opened
jobs:
triage_issues:
runs-on: ubuntu-latest
steps:
- name: Label issue
uses: andymckay/labeler@master
with:
add-labels: "status/triage"
ignore-if-assigned: true

14
.github/workflows/triage_prs.yml vendored Normal file
View file

@ -0,0 +1,14 @@
name: "Infra: Triage: Apply triage label for PRs"
on:
pull_request:
types:
- opened
jobs:
triage_prs:
runs-on: ubuntu-latest
steps:
- name: Label PR
uses: andymckay/labeler@master
with:
add-labels: "status/triage"
ignore-if-labeled: true

View file

@ -0,0 +1,32 @@
name: Welcome first time contributors
on:
pull_request_target:
types:
- opened
issues:
types:
- opened
permissions:
issues: write
pull-requests: write
jobs:
welcome:
runs-on: ubuntu-latest
steps:
- uses: actions/first-interaction@v1
with:
repo-token: ${{ secrets.GITHUB_TOKEN }}
issue-message: |
Hello there ${{ github.actor }}! 👋
Thank you and congratulations 🎉 for opening your very first issue in this project! 💖
In case you want to claim this issue, please comment down below! We will try to get back to you as soon as we can. 👀
pr-message: |
Hello there ${{ github.actor }}! 👋
Thank you and congrats 🎉 for opening your first PR on this project! ✨ 💖
We will try to review it soon!

22
.github/workflows/workflow_linter.yaml vendored Normal file
View file

@ -0,0 +1,22 @@
name: "Infra: Workflow linter"
on:
pull_request:
types:
- "opened"
- "reopened"
- "synchronize"
- "edited"
paths:
- ".github/workflows/**"
jobs:
build-and-test:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
with:
fetch-depth: 0
ref: ${{ github.event.pull_request.head.sha }}
- name: Install yamllint
run: sudo apt install -y yamllint
- name: Validate workflow yaml files
run: yamllint .github/workflows/. -d relaxed -f github --no-warnings

44
.gitignore vendored Normal file
View file

@ -0,0 +1,44 @@
HELP.md
target/
!.mvn/wrapper/maven-wrapper.jar
!**/src/main/**
!**/src/test/**
### STS ###
.apt_generated
.classpath
.factorypath
.project
.settings
.springBeans
.sts4-cache
### IntelliJ IDEA ###
.idea
*.iws
*.iml
*.ipr
### NetBeans ###
/nbproject/private/
/nbbuild/
/dist/
/nbdist/
/.nb-gradle/
build/
### VS Code ###
.vscode/
/kafka-ui-api/app/node
### SDKMAN ###
.sdkmanrc
.DS_Store
*.code-workspace
*.tar.gz
*.tgz
/docker/*.override.yaml

BIN
.mvn/wrapper/maven-wrapper.jar vendored Normal file

Binary file not shown.

18
.mvn/wrapper/maven-wrapper.properties vendored Normal file
View file

@ -0,0 +1,18 @@
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
distributionUrl=https://repo.maven.apache.org/maven2/org/apache/maven/apache-maven/3.8.6/apache-maven-3.8.6-bin.zip
wrapperUrl=https://repo.maven.apache.org/maven2/org/apache/maven/wrapper/maven-wrapper/3.1.1/maven-wrapper-3.1.1.jar

132
CODE-OF-CONDUCT.md Normal file
View file

@ -0,0 +1,132 @@
# Contributor Covenant Code of Conduct
## Our Pledge
We as members, contributors, and leaders pledge to make participation in our
community a harassment-free experience for everyone, regardless of age, body
size, visible or invisible disability, ethnicity, sex characteristics, gender
identity and expression, level of experience, education, socio-economic status,
nationality, personal appearance, race, caste, color, religion, or sexual identity
and orientation.
We pledge to act and interact in ways that contribute to an open, welcoming,
diverse, inclusive, and healthy community.
## Our Standards
Examples of behavior that contributes to a positive environment for our
community include:
* Demonstrating empathy and kindness toward other people
* Being respectful of differing opinions, viewpoints, and experiences
* Giving and gracefully accepting constructive feedback
* Accepting responsibility and apologizing to those affected by our mistakes,
and learning from the experience
* Focusing on what is best not just for us as individuals, but for the
overall community
Examples of unacceptable behavior include:
* The use of sexualized language or imagery, and sexual attention or
advances of any kind
* Trolling, insulting or derogatory comments, and personal or political attacks
* Public or private harassment
* Publishing others' private information, such as a physical or email
address, without their explicit permission
* Other conduct which could reasonably be considered inappropriate in a
professional setting
## Enforcement Responsibilities
Community leaders are responsible for clarifying and enforcing our standards of
acceptable behavior and will take appropriate and fair corrective action in
response to any behavior that they deem inappropriate, threatening, offensive,
or harmful.
Community leaders have the right and responsibility to remove, edit, or reject
comments, commits, code, wiki edits, issues, and other contributions that are
not aligned to this Code of Conduct, and will communicate reasons for moderation
decisions when appropriate.
## Scope
This Code of Conduct applies within all community spaces, and also applies when
an individual is officially representing the community in public spaces.
Examples of representing our community include using an official e-mail address,
posting via an official social media account, or acting as an appointed
representative at an online or offline event.
## Enforcement
Instances of abusive, harassing, or otherwise unacceptable behavior may be
reported to the community leaders responsible for enforcement at email kafkaui@provectus.com.
All complaints will be reviewed and investigated promptly and fairly.
All community leaders are obligated to respect the privacy and security of the
reporter of any incident.
## Enforcement Guidelines
Community leaders will follow these Community Impact Guidelines in determining
the consequences for any action they deem in violation of this Code of Conduct:
### 1. Correction
**Community Impact**: Use of inappropriate language or other behavior deemed
unprofessional or unwelcome in the community.
**Consequence**: A private, written warning from community leaders, providing
clarity around the nature of the violation and an explanation of why the
behavior was inappropriate. A public apology may be requested.
### 2. Warning
**Community Impact**: A violation through a single incident or series
of actions.
**Consequence**: A warning with consequences for continued behavior. No
interaction with the people involved, including unsolicited interaction with
those enforcing the Code of Conduct, for a specified period of time. This
includes avoiding interactions in community spaces as well as external channels
like social media. Violating these terms may lead to a temporary or
permanent ban.
### 3. Temporary Ban
**Community Impact**: A serious violation of community standards, including
sustained inappropriate behavior.
**Consequence**: A temporary ban from any sort of interaction or public
communication with the community for a specified period of time. No public or
private interaction with the people involved, including unsolicited interaction
with those enforcing the Code of Conduct, is allowed during this period.
Violating these terms may lead to a permanent ban.
### 4. Permanent Ban
**Community Impact**: Demonstrating a pattern of violation of community
standards, including sustained inappropriate behavior, harassment of an
individual, or aggression toward or disparagement of classes of individuals.
**Consequence**: A permanent ban from any sort of public interaction within
the community.
## Attribution
This Code of Conduct is adapted from the [Contributor Covenant][homepage],
version 2.0, available at
[https://www.contributor-covenant.org/version/2/0/code_of_conduct.html][v2.0].
Community Impact Guidelines were inspired by
[Mozilla's code of conduct enforcement ladder][Mozilla CoC].
For answers to common questions about this code of conduct, see the FAQ at
[https://www.contributor-covenant.org/faq][FAQ]. Translations are available
at [https://www.contributor-covenant.org/translations][translations].
[homepage]: https://www.contributor-covenant.org
[v2.0]: https://www.contributor-covenant.org/version/2/0/code_of_conduct.html
[Mozilla CoC]: https://github.com/mozilla/diversity
[FAQ]: https://www.contributor-covenant.org/faq
[translations]: https://www.contributor-covenant.org/translations

View file

@ -1,98 +1,97 @@
# Contributing
This guide is an exact copy of the same documented located [in our official docs](https://docs.kafka-ui.provectus.io/development/contributing). If there are any differences between the documents, the one located in our official docs should prevail.
This guide aims to walk you through the process of working on issues and Pull Requests (PRs).
Bear in mind that you will not be able to complete some steps on your own if you do not have “write” permission. Feel free to reach out to the maintainers to help you unlock these activities.
Bear in mind that you will not be able to complete some steps on your own if you do not have a “write” permission. Feel free to reach out to the maintainers to help you unlock these activities.
## General recommendations
# General recommendations
Please note that we have a [code of conduct](../project/code-of-conduct.md). Make sure that you follow it in all of your interactions with the project.
Please note that we have a code of conduct (`CODE-OF-CONDUCT.md`). Make sure that you follow it in all of your interactions with the project.
## Issues
# Issues
### Choosing an issue
## Choosing an issue
There are two options to look for the issues to contribute to.\
The first is our ["Up for grabs"](https://github.com/provectus/kafka-ui/projects/11) board. There the issues are sorted by the required experience level (beginner, intermediate, expert).
There are two options to look for the issues to contribute to. <br/>
The first is our ["Up for grabs"](https://github.com/provectus/kafka-ui/projects/11) board. There the issues are sorted by a required experience level (beginner, intermediate, expert).
The second option is to search for ["good first issue"](https://github.com/provectus/kafka-ui/issues?q=is%3Aissue+is%3Aopen+label%3A%22good+first+issue%22)-labeled issues. Some of them might not be displayed on the aforementioned board or vice versa.
The second option is to search for ["good first issue"](https://github.com/provectus/kafka-ui/issues?q=is%3Aissue+is%3Aopen+label%3A%22good+first+issue%22)-labeled issues. Some of them might not be displayed on the aforementioned board, or vice versa.
You also need to consider labels. You can sort the issues by scope labels, such as `scope/backend`, `scope/frontend` or even `scope/k8s`. If any issue covers several specific areas, and you do not have the required expertise for one of them, just do your part of the work — others will do the rest.
You also need to consider labels. You can sort the issues by scope labels, such as `scope/backend`, `scope/frontend` or even `scope/k8s`. If any issue covers several specific areas, and you do not have a required expertise for one of them, just do your part of work — others will do the rest.
### Grabbing the issue
## Grabbing the issue
There is a bunch of criteria that make an issue feasible for development.\
The implementation of any features and/or their enhancements should be reasonable and must be backed by justified requirements (demanded by the community, roadmap plans, etc.). The final decision is left to the maintainers' discretion.
There is a bunch of criteria that make an issue feasible for development. <br/>
The implementation of any features and/or their enhancements should be reasonable, must be backed by justified requirements (demanded by the community, [roadmap](https://docs.kafka-ui.provectus.io/project/roadmap) plans, etc.). The final decision is left for the maintainers' discretion.
All bugs should be confirmed as such (i.e. the behavior is unintended).
Any issue should be properly triaged by the maintainers beforehand, which includes:
1. Having a proper milestone set
2. Having required labels assigned: "accepted" label, scope labels, etc.
2. Having required labels assigned: accepted label, scope labels, etc.
Formally, if these triage conditions are met, you can start to work on the issue.
With all these requirements met, feel free to pick the issue you want. Reach out to the maintainers if you have any questions.
### Working on the issue
## Working on the issue
Every issue “in progress” needs to be assigned to a corresponding person. To keep the status of the issue clear to everyone, please keep the card's status updated ("project" card to the right of the issue should match the milestones name).
Every issue “in-progress” needs to be assigned to a corresponding person.
To keep the status of the issue clear to everyone, please keep the card's status updated ("project" card to the right of the issue should match the milestones name).
### Setting up a local development environment
## Setting up a local development environment
Please refer to this guide.
Please refer to [this guide](https://docs.kafka-ui.provectus.io/development/contributing).
## Pull Requests
# Pull Requests
### Branch naming
## Branch naming
In order to keep branch names uniform and easy to understand, please use the following conventions for branch naming.
In order to keep branch names uniform and easy-to-understand, please use the following conventions for branch naming.
Generally speaking, it is a good idea to add a group/type prefix to a branch; e.g., if you are working on a specific branch, you could name it `issues/xxx`.
Generally speaking, it is a good idea to add a group/type prefix to a branch; e.g.,
if you are working on a specific branch, you could name it `issues/xxx`.
Here is a list of good examples:\
`issues/123`\
`feature/feature_name`\
`bugfix/fix_thing`\
Here is a list of good examples:<br/>
`issues/123`<br/>
`feature/feature_name`<br/>
`bugfix/fix_thing`<br/>
## Code style
### Code style
Java: There is a file called `checkstyle.xml` in project root under `etc` directory.\
Java: There is a file called `checkstyle.xml` in project root under `etc` directory.<br/>
You can import it into IntelliJ IDEA via Checkstyle plugin.
### Naming conventions
REST paths should be written in **lowercase** and consist of **plural** nouns only.\
Also, multiple words that are placed in a single path segment should be divided by a hyphen (`-`).\
## Naming conventions
REST paths should be written in **lowercase** and consist of **plural** nouns only.<br/>
Also, multiple words that are placed in a single path segment should be divided by a hyphen (`-`).<br/>
Query variable names should be formatted in `camelCase`.
Model names should consist of **plural** nouns only and should be formatted in `camelCase` as well.
### Creating a PR
## Creating a PR
When creating a PR please do the following:
1. In commit messages use these [closing keywords](https://docs.github.com/en/issues/tracking-your-work-with-issues/linking-a-pull-request-to-an-issue#linking-a-pull-request-to-an-issue-using-a-keyword).
2. Link an issue(-s) via "linked issues" block.
3. Set the PR labels. Ensure that you set only the same set of labels that is present in the issue, and ignore yellow `status/` labels.
4. If the PR does not close any of the issues, the PR itself might need to have a milestone set. Reach out to the maintainers to consult.
5. Assign the PR to yourself. A PR assignee is someone whose goal is to get the PR merged.
6. Add reviewers. As a rule, reviewers' suggestions are pretty good; please use them.
7. Upon merging the PR, please use a meaningful commit message, the task name should be fine in this case.
7. Upon merging the PR, please use a meaningful commit message, task name should be fine in this case.
#### Pull Request checklist
### Pull Request checklist
1. When composing a build, ensure that any install or build dependencies have been removed before the end of the layer.
2. Update the `README.md` with the details of changes made to the interface. This includes new environment variables, exposed ports, useful file locations, and container parameters.
2. Update the `README.md` with the details of changes made to the interface. This includes new environment variables,
exposed ports, useful file locations, and container parameters.
### Reviewing a PR
## Reviewing a PR
WIP
#### Pull Request reviewer checklist
### Pull Request reviewer checklist
WIP

202
LICENSE Normal file
View file

@ -0,0 +1,202 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright 2020 CloudHut
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

142
README.md
View file

@ -1,13 +1,141 @@
---
description: About Kafka-UI
---
![UI for Apache Kafka logo](documentation/images/kafka-ui-logo.png) UI for Apache Kafka&nbsp;
------------------
#### Versatile, fast and lightweight web UI for managing Apache Kafka® clusters. Built by developers, for developers.
<br/>
# About
[![License](https://img.shields.io/badge/License-Apache%202.0-blue.svg)](https://github.com/provectus/kafka-ui/blob/master/LICENSE)
![UI for Apache Kafka Price Free](documentation/images/free-open-source.svg)
[![Release version](https://img.shields.io/github/v/release/provectus/kafka-ui)](https://github.com/provectus/kafka-ui/releases)
[![Chat with us](https://img.shields.io/discord/897805035122077716)](https://discord.gg/4DWzD7pGE5)
[![Docker pulls](https://img.shields.io/docker/pulls/provectuslabs/kafka-ui)](https://hub.docker.com/r/provectuslabs/kafka-ui)
**UI for Apache Kafka** is a versatile, fast, and lightweight web UI for managing Apache Kafka® clusters. Built by developers, for developers.
<p align="center">
<a href="https://docs.kafka-ui.provectus.io/">DOCS</a>
<a href="https://docs.kafka-ui.provectus.io/configuration/quick-start">QUICK START</a>
<a href="https://discord.gg/4DWzD7pGE5">COMMUNITY DISCORD</a>
<br/>
<a href="https://aws.amazon.com/marketplace/pp/prodview-ogtt5hfhzkq6a">AWS Marketplace</a>
<a href="https://www.producthunt.com/products/ui-for-apache-kafka/reviews/new">ProductHunt</a>
</p>
<p align="center">
<img src="https://repobeats.axiom.co/api/embed/2e8a7c2d711af9daddd34f9791143e7554c35d0f.svg" />
</p>
#### UI for Apache Kafka is a free, open-source web UI to monitor and manage Apache Kafka clusters.
The app is a free, open-source web UI to monitor and manage Apache Kafka clusters.
UI for Apache Kafka is a simple tool that makes your data flows observable, helps find and troubleshoot issues faster and deliver optimal performance. Its lightweight dashboard makes it easy to track key metrics of your Kafka clusters - Brokers, Topics, Partitions, Production, and Consumption.
UI for Apache Kafka is a simple tool that makes your data flows observable, helps find and troubleshoot issues faster and delivers optimal performance. Its lightweight dashboard makes it easy to track key metrics of your Kafka clusters - Brokers, Topics, Partitions, Production, and Consumption.
### DISCLAIMER
<em>UI for Apache Kafka is a free tool built and supported by the open-source community. Curated by Provectus, it will remain free and open-source, without any paid features or subscription plans to be added in the future.
Looking for the help of Kafka experts? Provectus can help you design, build, deploy, and manage Apache Kafka clusters and streaming applications. Discover [Professional Services for Apache Kafka](https://provectus.com/professional-services-apache-kafka/), to unlock the full potential of Kafka in your enterprise! </em>
Set up UI for Apache Kafka with just a couple of easy commands to visualize your Kafka data in a comprehensible way. You can run the tool locally or in
the cloud.
![Interface](documentation/images/Interface.gif)
# Features
* **Multi-Cluster Management** — monitor and manage all your clusters in one place
* **Performance Monitoring with Metrics Dashboard** — track key Kafka metrics with a lightweight dashboard
* **View Kafka Brokers** — view topic and partition assignments, controller status
* **View Kafka Topics** — view partition count, replication status, and custom configuration
* **View Consumer Groups** — view per-partition parked offsets, combined and per-partition lag
* **Browse Messages** — browse messages with JSON, plain text, and Avro encoding
* **Dynamic Topic Configuration** — create and configure new topics with dynamic configuration
* **Configurable Authentification** — [secure](https://docs.kafka-ui.provectus.io/configuration/authentication) your installation with optional Github/Gitlab/Google OAuth 2.0
* **Custom serialization/deserialization plugins** - [use](https://docs.kafka-ui.provectus.io/configuration/serialization-serde) a ready-to-go serde for your data like AWS Glue or Smile, or code your own!
* **Role based access control** - [manage permissions](https://docs.kafka-ui.provectus.io/configuration/rbac-role-based-access-control) to access the UI with granular precision
* **Data masking** - [obfuscate](https://docs.kafka-ui.provectus.io/configuration/data-masking) sensitive data in topic messages
# The Interface
UI for Apache Kafka wraps major functions of Apache Kafka with an intuitive user interface.
![Interface](documentation/images/Interface.gif)
## Topics
UI for Apache Kafka makes it easy for you to create topics in your browser by several clicks,
pasting your own parameters, and viewing topics in the list.
![Create Topic](documentation/images/Create_topic_kafka-ui.gif)
It's possible to jump from connectors view to corresponding topics and from a topic to consumers (back and forth) for more convenient navigation.
connectors, overview topic settings.
![Connector_Topic_Consumer](documentation/images/Connector_Topic_Consumer.gif)
### Messages
Let's say we want to produce messages for our topic. With the UI for Apache Kafka we can send or write data/messages to the Kafka topics without effort by specifying parameters, and viewing messages in the list.
![Produce Message](documentation/images/Create_message_kafka-ui.gif)
## Schema registry
There are 3 supported types of schemas: Avro®, JSON Schema, and Protobuf schemas.
![Create Schema Registry](documentation/images/Create_schema.gif)
Before producing avro/protobuf encoded messages, you have to add a schema for the topic in Schema Registry. Now all these steps are easy to do
with a few clicks in a user-friendly interface.
![Avro Schema Topic](documentation/images/Schema_Topic.gif)
# Getting Started
To run UI for Apache Kafka, you can use either a pre-built Docker image or build it (or a jar file) yourself.
## Quick start (Demo run)
```
docker run -it -p 8080:8080 -e DYNAMIC_CONFIG_ENABLED=true provectuslabs/kafka-ui
```
Then access the web UI at [http://localhost:8080](http://localhost:8080)
The command is sufficient to try things out. When you're done trying things out, you can proceed with a [persistent installation](https://docs.kafka-ui.provectus.io/quick-start/persistent-start)
## Persistent installation
```
services:
kafka-ui:
container_name: kafka-ui
image: provectuslabs/kafka-ui:latest
ports:
- 8080:8080
environment:
DYNAMIC_CONFIG_ENABLED: 'true'
volumes:
- ~/kui/config.yml:/etc/kafkaui/dynamic_config.yaml
```
Please refer to our [configuration](https://docs.kafka-ui.provectus.io/configuration/quick-start) page to proceed with further app configuration.
## Some useful configuration related links
[Web UI Cluster Configuration Wizard](https://docs.kafka-ui.provectus.io/configuration/configuration-wizard)
[Configuration file explanation](https://docs.kafka-ui.provectus.io/configuration/configuration-file)
[Docker Compose examples](https://docs.kafka-ui.provectus.io/configuration/compose-examples)
[Misc configuration properties](https://docs.kafka-ui.provectus.io/configuration/misc-configuration-properties)
## Helm charts
[Quick start](https://docs.kafka-ui.provectus.io/configuration/helm-charts/quick-start)
## Building from sources
[Quick start](https://docs.kafka-ui.provectus.io/development/building/prerequisites) with building
## Liveliness and readiness probes
Liveliness and readiness endpoint is at `/actuator/health`.<br/>
Info endpoint (build info) is located at `/actuator/info`.
# Configuration options
All of the environment variables/config properties could be found [here](https://docs.kafka-ui.provectus.io/configuration/misc-configuration-properties).
# Contributing
Please refer to [contributing guide](https://docs.kafka-ui.provectus.io/development/contributing), we'll guide you from there.

21
SECURITY.md Normal file
View file

@ -0,0 +1,21 @@
# Security Policy
## Supported Versions
Following versions of the project are currently being supported with security updates.
| Version | Supported |
| ------- | ------------------ |
| 0.7.x | :white_check_mark: |
| 0.6.x | :x: |
| 0.5.x | :x: |
| 0.4.x | :x: |
| 0.3.x | :x: |
| 0.2.x | :x: |
| 0.1.x | :x: |
## Reporting a Vulnerability
Please **DO NOT** file a publicly available github issues regarding security vulnerabilities.
Send us details via email (maintainers.kafka-ui "at" provectus.com).
Consider adding something like "security vulnerability report" in the title of an email.

View file

@ -1,58 +0,0 @@
# Table of contents
## 🎓 Overview
* [About](README.md)
* [Features](overview/features.md)
* [Getting started](overview/getting-started.md)
## 🛣 Project
* [Code of Conduct](project/code-of-conduct.md)
* [Roadmap](project/roadmap.md)
## 🛠 Development
* [Contributing](development/contributing.md)
* [Setting up git](development/setting-up-git.md)
* [Building](development/building/README.md)
* [Prerequisites](development/building/prerequisites.md)
* [With Docker](development/building/with-docker.md)
* [Without Docker](development/building/without-docker.md)
* [WIP: Testing](development/wip-testing.md)
## 👷♂ Configuration
* [Quick Start](configuration/quick-start/README.md)
* [via AWS Marketplace](configuration/quick-start/via-aws-marketplace.md)
* [Configuration wizard](configuration/configuration-wizard.md)
* [Configuration file](configuration/configuration-file.md)
* [Compose examples](configuration/compose-examples.md)
* [Helm charts](configuration/helm-charts/README.md)
* [Quick start](configuration/helm-charts/quick-start.md)
* [Configuration](configuration/helm-charts/configuration.md)
* [Resource limits](configuration/helm-charts/resource-limits.md)
* [Misc configuration properties](configuration/misc-configuration-properties.md)
* [Required Kafka Permissions](configuration/permissions/README.md)
* [Required Kafka ACLs](configuration/configuration/required-acls.md)
* [MSK (+Serverless) Setup](configuration/permissions/msk-+serverless-setup.md)
* [Complex configuration examples](configuration/configuration/complex-configuration-examples/README.md)
* [Kraft mode + multiple brokers](configuration/configuration/complex-configuration-examples/kraft-mode-+-multiple-brokers.md)
* [SSL](configuration/ssl.md)
* [Authentication](configuration/authentication/README.md)
* [Basic Authentication](configuration/authentication/basic-authentication.md)
* [OAuth2](configuration/authentication/oauth2.md)
* [AWS IAM](configuration/authentication/aws-iam.md)
* [LDAP / Active Directory](configuration/authentication/ldap-active-directory.md)
* [SSO Guide](configuration/authentication/sso-guide.md)
* [SASL\_SCRAM](configuration/authentication/sasl\_scram.md)
* [RBAC (Role based access control)](configuration/rbac-role-based-access-control/README.md)
* [Supported Identity Providers](configuration/rbac-role-based-access-control/supported-identity-providers.md)
* [Data masking](configuration/data-masking.md)
* [Serialization / SerDe](configuration/serialization-serde.md)
* [OpenDataDiscovery Integration](configuration/opendatadiscovery-integration.md)
## ❓ FAQ
* [Common problems](faq/common-problems.md)
* [FAQ](faq/faq.md)

View file

@ -1,2 +0,0 @@
# Authentication

View file

@ -1,45 +0,0 @@
---
description: How to configure AWS IAM Authentication
---
# AWS IAM
UI for Apache Kafka comes with built-in [aws-msk-iam-auth](https://github.com/aws/aws-msk-iam-auth) library.
You could pass sasl configs in properties section for each cluster.
More details could be found here: [aws-msk-iam-auth](https://github.com/aws/aws-msk-iam-auth)
### Examples:
Please replace
* \<KAFKA\_URL> with broker list
* \<PROFILE\_NAME> with your aws profile
#### Running From Docker Image
```
docker run -p 8080:8080 \
-e KAFKA_CLUSTERS_0_NAME=local \
-e KAFKA_CLUSTERS_0_BOOTSTRAPSERVERS=<KAFKA_URL> \
-e KAFKA_CLUSTERS_0_PROPERTIES_SECURITY_PROTOCOL=SASL_SSL \
-e KAFKA_CLUSTERS_0_PROPERTIES_SASL_MECHANISM=AWS_MSK_IAM \
-e KAFKA_CLUSTERS_0_PROPERTIES_SASL_CLIENT_CALLBACK_HANDLER_CLASS=software.amazon.msk.auth.iam.IAMClientCallbackHandler \
-e KAFKA_CLUSTERS_0_PROPERTIES_SASL_JAAS_CONFIG=software.amazon.msk.auth.iam.IAMLoginModule required awsProfileName="<PROFILE_NAME>"; \
-d provectuslabs/kafka-ui:latest
```
#### Configuring by application.yaml
```yaml
kafka:
clusters:
- name: local
bootstrapServers: <KAFKA_URL>
properties:
security.protocol: SASL_SSL
sasl.mechanism: AWS_MSK_IAM
sasl.client.callback.handler.class: software.amazon.msk.auth.iam.IAMClientCallbackHandler
sasl.jaas.config: software.amazon.msk.auth.iam.IAMLoginModule required awsProfileName="<PROFILE_NAME>";
```

View file

@ -1,15 +0,0 @@
---
description: Basic username+password authentication
---
# Basic Authentication
In order to enable basic username+passworda authentication add these properties:
```
AUTH_TYPE: "LOGIN_FORM"
SPRING_SECURITY_USER_NAME: admin
SPRING_SECURITY_USER_PASSWORD: pass
```
Please note that basic auth is not compatible with neither any other auth method nor RBAC.

View file

@ -1,20 +0,0 @@
# LDAP / Active Directory
```
auth:
type: LDAP
spring:
ldap:
urls: ldap://localhost:10389
base: "cn={0},ou=people,dc=planetexpress,dc=com"
admin-user: "cn=admin,dc=planetexpress,dc=com"
admin-password: "GoodNewsEveryone"
user-filter-search-base: "dc=planetexpress,dc=com"
user-filter-search-filter: "(&(uid={0})(objectClass=inetOrgPerson))"
group-filter-search-base: "ou=people,dc=planetexpress,dc=com" # required for RBAC
oauth2:
ldap:
activeDirectory: false
aсtiveDirectory:
domain: memelord.lol
```

View file

@ -1,162 +0,0 @@
---
description: Examples of setups for different OAuth providers
---
# OAuth2
In general, the structure of the config looks like this:
For specific providers (like github (non-enterprise) and google, see further) you don't have to specify URLs as they're [well-known](https://github.com/spring-projects/spring-security/blob/main/config/src/main/java/org/springframework/security/config/oauth2/client/CommonOAuth2Provider.java#L35).
```
auth:
type: OAUTH2
oauth2:
client:
<unique_name>:
clientId: xxx
clientSecret: yyy
scope: openid
client-name: cognito # will be displayed on the login page
provider: <provider>
redirect-uri: http://localhost:8080/login/oauth2/code/<provider>
authorization-grant-type: authorization_code
issuer-uri: https://xxx
jwk-set-uri: https://yyy/.well-known/jwks.json
user-name-attribute: <zzz>
custom-params:
type: <provider_type> # fill this if you're gonna use RBAC AND the type is one of the supported RBAC providers
roles-field: groups # required for RBAC, a field name in OAuth token which will contain user's roles/groups
```
### Cognito
```yaml
kafka:
clusters:
- name: local
bootstrapServers: localhost:9092
# ...
auth:
type: OAUTH2
oauth2:
client:
cognito:
clientId: xxx
clientSecret: yyy
scope: openid
client-name: cognito
provider: cognito
redirect-uri: http://localhost:8080/login/oauth2/code/cognito
authorization-grant-type: authorization_code
issuer-uri: https://cognito-idp.eu-central-1.amazonaws.com/eu-central-1_xxx
jwk-set-uri: https://cognito-idp.eu-central-1.amazonaws.com/eu-central-1_xxx/.well-known/jwks.json
user-name-attribute: cognito:username
custom-params:
type: cognito
logoutUrl: https://<XXX>>.eu-central-1.amazoncognito.com/logout #required just for cognito
```
### Google
```yaml
kafka:
clusters:
- name: local
bootstrapServers: localhost:9092
# ...
auth:
type: OAUTH2
oauth2:
client:
google:
provider: google
clientId: xxx.apps.googleusercontent.com
clientSecret: GOCSPX-xxx
user-name-attribute: email
custom-params:
type: google
allowedDomain: provectus.com # for RBAC
```
### GitHub
Example of callback URL for github OAuth app settings:
`https://www.kafka-ui.provectus.io/login/oauth2/code/github`
For the self-hosted installation find the properties a little bit below.
```yaml
kafka:
clusters:
- name: local
bootstrapServers: localhost:9092
# ...
auth:
type: OAUTH2
oauth2:
client:
github:
provider: github
clientId: xxx
clientSecret: yyy
scope: read:org
user-name-attribute: login
custom-params:
type: github
```
#### Self-hosted/Cloud (GitHub Enterprise Server)
Replace `HOSTNAME` by your self-hosted platform FQDN.
```yaml
kafka:
clusters:
- name: local
bootstrapServers: localhost:9092
# ...
auth:
type: OAUTH2
oauth2:
client:
github:
provider: github
clientId: xxx
clientSecret: yyy
scope: read:org
user-name-attribute: login
authorization-uri: http(s)://HOSTNAME/login/oauth/authorize
token-uri: http(s)://HOSTNAME/login/oauth/access_token
user-info-uri: http(s)://HOSTNAME/api/v3/user
custom-params:
type: github
```
### Okta
```yaml
auth:
type: OAUTH2
oauth2:
client:
okta:
clientId: xxx
clientSecret: yyy
scope: [ 'openid', 'profile', 'email', 'groups' ] # default for okta + groups for rbac
client-name: Okta
provider: okta
redirect-uri: http://localhost:8080/login/oauth2/code/okta
authorization-grant-type: authorization_code
issuer-uri: https://<okta_domain>.okta.com
jwk-set-uri: https://yyy/.well-known/jwks.json
user-name-attribute: sub # default for okta, "email" also available
custom-params:
type: oauth
roles-field: groups # required for RBAC
```

View file

@ -1,63 +0,0 @@
---
description: How to configure SASL SCRAM Authentication
---
# SASL\_SCRAM
You could pass sasl configs in properties section for each cluster.
### Examples:
Please replace
* \<KAFKA\_NAME> with cluster name
* \<KAFKA\_URL> with broker list
* \<KAFKA\_USERNAME> with username
* \<KAFKA\_PASSWORD> with password
#### Running From Docker Image
```
docker run -p 8080:8080 \
-e KAFKA_CLUSTERS_0_NAME=<KAFKA_NAME> \
-e KAFKA_CLUSTERS_0_BOOTSTRAPSERVERS=<KAFKA_URL> \
-e KAFKA_CLUSTERS_0_PROPERTIES_SECURITY_PROTOCOL=SASL_SSL \
-e KAFKA_CLUSTERS_0_PROPERTIES_SASL_MECHANISM=SCRAM-SHA-512 \
-e KAFKA_CLUSTERS_0_PROPERTIES_SASL_JAAS_CONFIG=org.apache.kafka.common.security.scram.ScramLoginModule required username="<KAFKA_USERNAME>" password="<KAFKA_PASSWORD>"; \
-d provectuslabs/kafka-ui:latest
```
#### Running From Docker-compose file
```yaml
version: '3.4'
services:
kafka-ui:
image: provectuslabs/kafka-ui
container_name: kafka-ui
ports:
- "888:8080"
restart: always
environment:
- KAFKA_CLUSTERS_0_NAME=<KAFKA_NAME>
- KAFKA_CLUSTERS_0_BOOTSTRAPSERVERS=<KAFKA_URL>
- KAFKA_CLUSTERS_0_PROPERTIES_SECURITY_PROTOCOL=SASL_SSL
- KAFKA_CLUSTERS_0_PROPERTIES_SASL_MECHANISM=SCRAM-SHA-512
- KAFKA_CLUSTERS_0_PROPERTIES_SASL_JAAS_CONFIG=org.apache.kafka.common.security.scram.ScramLoginModule required username="<KAFKA_USERNAME>" password="<KAFKA_PASSWORD>";
- KAFKA_CLUSTERS_0_PROPERTIES_PROTOCOL=SASL
```
#### Configuring by application.yaml
```yaml
kafka:
clusters:
- name: local
bootstrapServers: <KAFKA_URL>
properties:
security.protocol: SASL_SSL
sasl.mechanism: SCRAM-SHA-512
sasl.jaas.config: org.apache.kafka.common.security.scram.ScramLoginModule required username="<KAFKA_USERNAME>" password="<KAFKA_PASSWORD>";
```

View file

@ -1,85 +0,0 @@
# SSO Guide
## How to configure SSO
SSO require additionaly to configure TLS for application, in that example we will use self-signed certificate, in case of use legal certificates please skip step 1.
### Step 1
At this step we will generate self-signed PKCS12 keypair.
```bash
mkdir cert
keytool -genkeypair -alias ui-for-apache-kafka -keyalg RSA -keysize 2048 \
-storetype PKCS12 -keystore cert/ui-for-apache-kafka.p12 -validity 3650
```
### Step 2
Create new application in any SSO provider, we will continue with [Auth0](https://auth0.com).
![](https://user-images.githubusercontent.com/1494347/172255269-94cb9e3a-042b-49bb-925e-a06344840662.png)
After that need to provide callback URLs, in our case we will use `https://127.0.0.1:8080/login/oauth2/code/auth0`
![](https://user-images.githubusercontent.com/1494347/172255294-86af29b9-642b-4fb5-9ba8-212185e3fdfc.png)
This is a main parameters required for enabling SSO
![](https://user-images.githubusercontent.com/1494347/172255315-4f12ac92-ca13-4206-ab68-48092e562092.png)
### Step 3
To launch UI for Apache Kafka with enabled TLS and SSO run following:
```bash
docker run -p 8080:8080 -v `pwd`/cert:/opt/cert -e AUTH_TYPE=LOGIN_FORM \
-e SECURITY_BASIC_ENABLED=true \
-e SERVER_SSL_KEY_STORE_TYPE=PKCS12 \
-e SERVER_SSL_KEY_STORE=/opt/cert/ui-for-apache-kafka.p12 \
-e SERVER_SSL_KEY_STORE_PASSWORD=123456 \
-e SERVER_SSL_KEY_ALIAS=ui-for-apache-kafka \
-e SERVER_SSL_ENABLED=true \
-e SPRING_SECURITY_OAUTH2_CLIENT_REGISTRATION_AUTH0_CLIENTID=uhvaPKIHU4ZF8Ne4B6PGvF0hWW6OcUSB \
-e SPRING_SECURITY_OAUTH2_CLIENT_REGISTRATION_AUTH0_CLIENTSECRET=YXfRjmodifiedTujnkVr7zuW9ECCAK4TcnCio-i \
-e SPRING_SECURITY_OAUTH2_CLIENT_PROVIDER_AUTH0_ISSUER_URI=https://dev-a63ggcut.auth0.com/ \
-e SPRING_SECURITY_OAUTH2_CLIENT_REGISTRATION_AUTH0_SCOPE=openid \
-e TRUST_STORE=/opt/cert/ui-for-apache-kafka.p12 \
-e TRUST_STORE_PASSWORD=123456 \
provectuslabs/kafka-ui:latest
```
In the case with trusted CA-signed SSL certificate and SSL termination somewhere outside of application we can pass only SSO related environment variables:
```bash
docker run -p 8080:8080 -v `pwd`/cert:/opt/cert -e AUTH_TYPE=OAUTH2 \
-e SPRING_SECURITY_OAUTH2_CLIENT_REGISTRATION_AUTH0_CLIENTID=uhvaPKIHU4ZF8Ne4B6PGvF0hWW6OcUSB \
-e SPRING_SECURITY_OAUTH2_CLIENT_REGISTRATION_AUTH0_CLIENTSECRET=YXfRjmodifiedTujnkVr7zuW9ECCAK4TcnCio-i \
-e SPRING_SECURITY_OAUTH2_CLIENT_PROVIDER_AUTH0_ISSUER_URI=https://dev-a63ggcut.auth0.com/ \
-e SPRING_SECURITY_OAUTH2_CLIENT_REGISTRATION_AUTH0_SCOPE=openid \
provectuslabs/kafka-ui:latest
```
### Step 4 (Load Balancer HTTP) (optional)
If you're using load balancer/proxy and use HTTP between the proxy and the app, you might want to set `server_forward-headers-strategy` to `native` as well (`SERVER_FORWARDHEADERSSTRATEGY=native`), for more info refer to [this issue](https://github.com/provectus/kafka-ui/issues/1017).
### Step 5 (Azure) (optional)
For Azure AD (Office365) OAUTH2 you'll want to add additional environment variables:
```bash
docker run -p 8080:8080 \
-e KAFKA_CLUSTERS_0_NAME="${cluster_name}"\
-e KAFKA_CLUSTERS_0_BOOTSTRAPSERVERS="${kafka_listeners}" \
-e KAFKA_CLUSTERS_0_ZOOKEEPER="${zookeeper_servers}" \
-e KAFKA_CLUSTERS_0_KAFKACONNECT_0_ADDRESS="${kafka_connect_servers}"
-e AUTH_TYPE=OAUTH2 \
-e SPRING_SECURITY_OAUTH2_CLIENT_REGISTRATION_AUTH0_CLIENTID=uhvaPKIHU4ZF8Ne4B6PGvF0hWW6OcUSB \
-e SPRING_SECURITY_OAUTH2_CLIENT_REGISTRATION_AUTH0_CLIENTSECRET=YXfRjmodifiedTujnkVr7zuW9ECCAK4TcnCio-i \
-e SPRING_SECURITY_OAUTH2_CLIENT_REGISTRATION_AUTH0_SCOPE="https://graph.microsoft.com/User.Read" \
-e SPRING_SECURITY_OAUTH2_CLIENT_PROVIDER_AUTH0_ISSUER_URI="https://login.microsoftonline.com/{tenant-id}/v2.0" \
-d provectuslabs/kafka-ui:latest"
```
Note that scope is created by default when Application registration is done in Azure portal. You'll need to update application registration manifest to include `"accessTokenAcceptedVersion": 2`

View file

@ -1,7 +0,0 @@
---
description: A list of ready-to-go docker compose files for various setup scenarios
---
# Compose examples
[https://github.com/provectus/kafka-ui/blob/master/documentation/compose/DOCKER\_COMPOSE.md](https://github.com/provectus/kafka-ui/blob/master/documentation/compose/DOCKER\_COMPOSE.md)

View file

@ -1,66 +0,0 @@
---
description: This page explains configuration file structure
---
# Configuration file
Let's start with that there are two possible ways to configure the app, they can interchange each other or even supplement each other.
There are two ways: YAML config & env. variables config. We **strongly** recommend using YAML in favor of env variables for the most part of the config. You can use env vars to override the default config on some different environments.
[This tool](https://env.simplestep.ca/) can help you to translate your config back and forth from YAML to env vars.
We will mostly provide examples of configs in YAML format, but sometimes single properties might be written in form of env variables.
Rather than writing your config from a scratch, it would be more convenient to use one of the ready-to-go [compose examples](compose-examples.md) and adjust it to your needs.
#### Providing a config path for the app instance:
**Docker**: `docker run -it -p 8080:8080 -e spring.config.additional-location=/tmp/config.yml -v /tmp/kui/config.yml:/tmp/config.yml provectuslabs/kafka-ui`
**Docker compose**:&#x20;
```
services:
kafka-ui:
container_name: kafka-ui
image: provectuslabs/kafka-ui:latest
environment:
KAFKA_CLUSTERS_0_NAME: local
# other properties, omitted
SPRING_CONFIG_ADDITIONAL-LOCATION: /config.yml
volumes:
- /tmp/config.yml:/config.yml
```
**Jar**: `java -Dspring.config.additional-location=<path-to-application-local.yml> -jar <path-to-jar>.jar`
#### Basic config structure
```yaml
kafka:
clusters:
-
name: local
bootstrapServers: localhost:29091
schemaRegistry: http://localhost:8085
schemaRegistryAuth:
username: username
password: password
# schemaNameTemplate: "%s-value"
metrics:
port: 9997
type: JMX
```
* `name`: cluster name
* `bootstrapServers`: where to connect
* `schemaRegistry`: schemaRegistry's address
* `schemaRegistryAuth.username`: schemaRegistry's basic authentication username
* `schemaRegistryAuth.password`: schemaRegistry's basic authentication password
* `schemaNameTemplate`: how keys are saved to Schema Registry
* `metrics.port`: open the JMX port of a broker
* `metrics.type`: Type of metrics, either JMX or PROMETHEUS. Defaulted to JMX.
* `readOnly`: enable read-only mode
Configure as many clusters as you need by adding their configs below separated with `-`.

View file

@ -1,53 +0,0 @@
# Configuration wizard
## Dynamic application configuration
![](../.gitbook/assets/image.png)![](<../.gitbook/assets/image (1).png>)
By default, kafka-ui does not allow to change of its configuration in runtime. When the application is started it reads configuration from system env, config files (ex. application.yaml), and JVM arguments (set by `-D`). Once the configuration was read it was treated as immutable and won't be refreshed even if the config source (ex. file) was changed.
Since version 0.6 we added an ability to change cluster configs in runtime. This option is disabled by default and should be implicitly enabled. To enable it, you should set `DYNAMIC_CONFIG_ENABLED` env property to `true` or add `dynamic.config.enabled: true` property to your yaml config file.
Sample docker compose configuration:
```
services:
kafka-ui:
container_name: kafka-ui
image: provectuslabs/kafka-ui:latest
ports:
- 8080:8080
depends_on:
- kafka0
environment:
DYNAMIC_CONFIG_ENABLED: 'true'
KAFKA_CLUSTERS_0_NAME: wizard_test
KAFKA_CLUSTERS_0_BOOTSTRAPSERVERS: kafka0:29092
...
```
You can even omit all vars other than `DYNAMIC_CONFIG_ENABLED` to start the application with empty configs and setup it up after startup.
When the dynamic config feature is enabled you will see additional buttons that will take you to "Wizard" for editing existing cluster configuration or adding new clusters:
&#x20;
#### Dynamic config files
Kafka-ui is a stateless application by its nature, so, when you edit configuration during runtime, it will store configuration additions on the container's filesystem (in `dynamic_config.yaml` file). Dynamic config file will be overridden on each configuration submission.
During the configuration process, you can also upload configuration-related files (like truststore and keystores). They will be stored in `etc/kafkaui/uploads` a folder with a unique timestamp suffix to prevent name collision. In the wizard, you can also use files that were mounted to the container's filesystem, without uploading them directly.
**Note**, that if the container is recreated, your edited (and uploaded) files won't be present and the app will be started with static configuration only. If you want to be able to keep the configuration created by wizard, you have to mount/copy the same files into newly created kafka-ui containers (whole `/etc/kafkaui/` folder, by default).
Properties, specified where dynamic config files will be persisted:
| Env variable name | Yaml property | Default | Description |
| ---------------------------- | ---------------------------- | ---------------------------------- | ---------------------------------------- |
| `DYNAMIC_CONFIG_PATH` | `dynamic.config.path` | `/etc/kafkaui/dynamic_config.yaml` | Path to dynamic config file |
| `CONFIG_RELATED_UPLOADS_DIR` | `config.related.uploads.dir` | `/etc/kafkaui/uploads` | Path where uploaded files will be placed |
#### Implementation notes:
Currently, the new configuration submission leads to a full application restart. So, if your kafka-ui app is starting slow (not a usual case, but may happen when you have a slow connection to kafka clusters) you may notice UI inaccessibility during restart time.

View file

@ -1,2 +0,0 @@
# Complex configuration examples

View file

@ -1,22 +0,0 @@
---
description: ACLs required to run the app
---
# Required Kafka ACLs
## ACLs for standalone kafka
This list is enough to run the app in r/o mode
```
Permission | Operation | ResourceType | ResourceName | PatternType
------------+------------------+--------------+---------------+--------------
ALLOW | READ | TOPIC | * | LITERAL
ALLOW | DESCRIBE_CONFIGS | TOPIC | * | LITERAL
ALLOW | DESCRIBE | GROUP | * | LITERAL
ALLOW | DESCRIBE | CLUSTER | kafka-cluster | LITERAL
ALLOW | DESCRIBE_CONFIGS | CLUSTER | kafka-cluster | LITERAL
```

View file

@ -1,136 +0,0 @@
# Data masking
## Topics data masking
You can configure kafka-ui to mask sensitive data shown in Messages page.
Several masking policies supported:
#### REMOVE
For json objects - remove target fields, otherwise - return "null" string.
```yaml
- type: REMOVE
fields: [ "id", "name" ]
...
```
Apply examples:
```
{ "id": 1234, "name": { "first": "James" }, "age": 30 }
->
{ "age": 30 }
```
```
non-json string -> null
```
#### REPLACE
For json objects - replace target field's values with specified replacement string (by default with `***DATA_MASKED***`). Note: if target field's value is object, then replacement applied to all its fields recursively (see example).
```yaml
- type: REPLACE
fields: [ "id", "name" ]
replacement: "***" #optional, "***DATA_MASKED***" by default
...
```
Apply examples:
```
{ "id": 1234, "name": { "first": "James", "last": "Bond" }, "age": 30 }
->
{ "id": "***", "name": { "first": "***", "last": "***" }, "age": 30 }
```
```
non-json string -> ***
```
#### MASK
Mask target field's values with specified masking characters, recursively (spaces and line separators will be kept as-is). `pattern` array specifies what symbols will be used to replace upper-case chars, lower-case chars, digits and other symbols correspondingly.
```yaml
- type: MASK
fields: [ "id", "name" ]
pattern: ["A", "a", "N", "_"] # optional, default is ["X", "x", "n", "-"]
...
```
Apply examples:
```
{ "id": 1234, "name": { "first": "James", "last": "Bond!" }, "age": 30 }
->
{ "id": "NNNN", "name": { "first": "Aaaaa", "last": "Aaaa_" }, "age": 30 }
```
```
Some string! -> Aaaa aaaaaa_
```
***
For each policy, if `fields` not specified, then policy will be applied to all object's fields or whole string if it is not a json-object.
You can specify which masks will be applied to topic's keys/values. Multiple policies will be applied if topic matches both policy's patterns.
Yaml configuration example:
```yaml
kafka:
clusters:
- name: ClusterName
# Other Cluster configuration omitted ...
masking:
- type: REMOVE
fields: [ "id" ]
topicKeysPattern: "events-with-ids-.*"
topicValuesPattern: "events-with-ids-.*"
- type: REPLACE
fields: [ "companyName", "organizationName" ]
replacement: "***MASKED_ORG_NAME***" #optional
topicValuesPattern: "org-events-.*"
- type: MASK
fields: [ "name", "surname" ]
pattern: ["A", "a", "N", "_"] #optional
topicValuesPattern: "user-states"
- type: MASK
topicValuesPattern: "very-secured-topic"
```
Same configuration in env-vars fashion:
```
...
KAFKA_CLUSTERS_0_MASKING_0_TYPE: REMOVE
KAFKA_CLUSTERS_0_MASKING_0_FIELDS_0: "id"
KAFKA_CLUSTERS_0_MASKING_0_TOPICKEYSPATTERN: "events-with-ids-.*"
KAFKA_CLUSTERS_0_MASKING_0_TOPICVALUESPATTERN: "events-with-ids-.*"
KAFKA_CLUSTERS_0_MASKING_1_TYPE: REPLACE
KAFKA_CLUSTERS_0_MASKING_1_FIELDS_0: "companyName"
KAFKA_CLUSTERS_0_MASKING_1_FIELDS_1: "organizationName"
KAFKA_CLUSTERS_0_MASKING_1_REPLACEMENT: "***MASKED_ORG_NAME***"
KAFKA_CLUSTERS_0_MASKING_1_TOPICVALUESPATTERN: "org-events-.*"
KAFKA_CLUSTERS_0_MASKING_2_TYPE: MASK
KAFKA_CLUSTERS_0_MASKING_2_FIELDS_0: "name"
KAFKA_CLUSTERS_0_MASKING_2_FIELDS_1: "surname"
KAFKA_CLUSTERS_0_MASKING_2_PATTERN_0: 'A'
KAFKA_CLUSTERS_0_MASKING_2_PATTERN_1: 'a'
KAFKA_CLUSTERS_0_MASKING_2_PATTERN_2: 'N'
KAFKA_CLUSTERS_0_MASKING_2_PATTERN_3: '_'
KAFKA_CLUSTERS_0_MASKING_2_TOPICVALUESPATTERN: "user-states"
KAFKA_CLUSTERS_0_MASKING_3_TYPE: MASK
KAFKA_CLUSTERS_0_MASKING_3_TOPICVALUESPATTERN: "very-secured-topic"
```

View file

@ -1,3 +0,0 @@
# Helm charts
UI for Apache Kafka is also available as a helm chart. See the underlying articles to learn more about it.

View file

@ -1,40 +0,0 @@
# Configuration
Most of the Helm charts parameters are common, follow table describes unique parameters related to application configuration.
#### Kafka-UI parameters
| Parameter | Description | Default |
| ----------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------- | ----------- |
| `existingConfigMap` | Name of the existing ConfigMap with Kafka-UI environment variables | `nil` |
| `existingSecret` | Name of the existing Secret with Kafka-UI environment variables | `nil` |
| `envs.secret` | Set of the sensitive environment variables to pass to Kafka-UI | `{}` |
| `envs.config` | Set of the environment variables to pass to Kafka-UI | `{}` |
| `yamlApplicationConfigConfigMap` | Map with name and keyName keys, name refers to the existing ConfigMap, keyName refers to the ConfigMap key with Kafka-UI config in Yaml format | `{}` |
| `yamlApplicationConfig` | Kafka-UI config in Yaml format | `{}` |
| `networkPolicy.enabled` | Enable network policies | `false` |
| `networkPolicy.egressRules.customRules` | Custom network egress policy rules | `[]` |
| `networkPolicy.ingressRules.customRules` | Custom network ingress policy rules | `[]` |
| `podLabels` | Extra labels for Kafka-UI pod | `{}` |
| `route.enabled` | Enable OpenShift route to expose the Kafka-UI service | `false` |
| `route.annotations` | Add annotations to the OpenShift route | `{}` |
| `route.tls.enabled` | Enable OpenShift route as a secured endpoint | `false` |
| `route.tls.termination` | Set OpenShift Route TLS termination | `edge` |
| `route.tls.insecureEdgeTerminationPolicy` | Set OpenShift Route Insecure Edge Termination Policy | `Redirect` |
### Example
To install Kafka-UI need to execute follow:
```bash
helm repo add kafka-ui https://provectus.github.io/kafka-ui
helm install kafka-ui kafka-ui/kafka-ui --set envs.config.KAFKA_CLUSTERS_0_NAME=local --set envs.config.KAFKA_CLUSTERS_0_BOOTSTRAPSERVERS=kafka:9092
```
To connect to Kafka-UI web application need to execute:
```bash
kubectl port-forward svc/kafka-ui 8080:80
```
Open the `http://127.0.0.1:8080` on the browser to access Kafka-UI.

View file

@ -1,86 +0,0 @@
---
description: Quick Start with Helm Chart
---
# Quick start
## General
1. Clone/Copy Chart to your working directory
2. Execute command
```
helm repo add kafka-ui https://provectus.github.io/kafka-ui-charts
helm install kafka-ui kafka-ui/kafka-ui
```
#### Passing Kafka-UI configuration as Dict
Create values.yml file
```
yamlApplicationConfig:
kafka:
clusters:
- name: yaml
bootstrapServers: kafka-cluster-broker-endpoints:9092
auth:
type: disabled
management:
health:
ldap:
enabled: false
```
Install by executing command
> helm install helm-release-name charts/kafka-ui -f values.yml
#### Passing configuration file as ConfigMap
Create config map
```
apiVersion: v1
kind: ConfigMap
metadata:
name: kafka-ui-configmap
data:
config.yml: |-
kafka:
clusters:
- name: yaml
bootstrapServers: kafka-cluster-broker-endpoints:9092
auth:
type: disabled
management:
health:
ldap:
enabled: false
```
This ConfigMap will be mounted to the Pod
Install by executing the command
> helm install helm-release-name charts/kafka-ui --set yamlApplicationConfigConfigMap.name="kafka-ui-configmap",yamlApplicationConfigConfigMap.keyName="config.yml"
#### Passing environment variables as ConfigMap
Create config map
```
apiVersion: v1
kind: ConfigMap
metadata:
name: kafka-ui-helm-values
data:
KAFKA_CLUSTERS_0_NAME: "kafka-cluster-name"
KAFKA_CLUSTERS_0_BOOTSTRAPSERVERS: "kafka-cluster-broker-endpoints:9092"
AUTH_TYPE: "DISABLED"
MANAGEMENT_HEALTH_LDAP_ENABLED: "FALSE"
```
Install by executing the command
> helm install helm-release-name charts/kafka-ui --set existingConfigMap="kafka-ui-helm-values"

View file

@ -1,35 +0,0 @@
---
description: How to set up resource limits
---
# Resource limits
There are two options:
### Set limits via changing values.yaml
To set or change resource limits for pods you need to create the file `values.yaml` and add the following lines:
```
resources:
limits:
cpu: 200m
memory: 512Mi
requests:
cpu: 200m
memory: 256Mi
```
Specify `values.yaml` file during chart install
```
helm install kafka-ui kafka-ui/kafka-ui -f values.yaml
```
### Set limits via CLI
To set limits via CLI you need to specify limits with helm install command.
```
helm install kafka-ui kafka-ui/kafka-ui --set resources.limits.cpu=200m --set resources.limits.memory=512Mi --set resources.requests.memory=256Mi --set resources.requests.cpu=200m
```

View file

@ -1,65 +0,0 @@
---
description: Configuration properties for all the things
---
# Misc configuration properties
A reminder: any of these properties can be converted into yaml config properties, an example:
`KAFKA_CLUSTERS_0_BOOTSTRAPSERVERS`
becomes
```
kafka:
clusters:
- bootstrapServers: xxx
```
| Name | Description |
| ----------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------- |
| `SERVER_SERVLET_CONTEXT_PATH` | URI basePath |
| `LOGGING_LEVEL_ROOT` | Setting log level (trace, debug, info, warn, error). Default: info |
| `LOGGING_LEVEL_COM_PROVECTUS` | Setting log level (trace, debug, info, warn, error). Default: debug |
| `SERVER_PORT` | Port for the embedded server. Default: `8080` |
| `KAFKA_ADMIN-CLIENT-TIMEOUT` | Kafka API timeout in ms. Default: `30000` |
| `KAFKA_CLUSTERS_0_NAME` | Cluster name |
| `KAFKA_CLUSTERS_0_BOOTSTRAPSERVERS` | Address where to connect |
| `KAFKA_CLUSTERS_0_KSQLDBSERVER` | KSQL DB server address |
| `KAFKA_CLUSTERS_0_KSQLDBSERVERAUTH_USERNAME` | KSQL DB server's basic authentication username |
| `KAFKA_CLUSTERS_0_KSQLDBSERVERAUTH_PASSWORD` | KSQL DB server's basic authentication password |
| `KAFKA_CLUSTERS_0_KSQLDBSERVERSSL_KEYSTORELOCATION` | Path to the JKS keystore to communicate to KSQL DB |
| `KAFKA_CLUSTERS_0_KSQLDBSERVERSSL_KEYSTOREPASSWORD` | Password of the JKS keystore for KSQL DB |
| `KAFKA_CLUSTERS_0_PROPERTIES_SECURITY_PROTOCOL` | Security protocol to connect to the brokers. For SSL connection use "SSL", for plaintext connection don't set this environment variable |
| `KAFKA_CLUSTERS_0_SCHEMAREGISTRY` | SchemaRegistry's address |
| `KAFKA_CLUSTERS_0_SCHEMAREGISTRYAUTH_USERNAME` | SchemaRegistry's basic authentication username |
| `KAFKA_CLUSTERS_0_SCHEMAREGISTRYAUTH_PASSWORD` | SchemaRegistry's basic authentication password |
| `KAFKA_CLUSTERS_0_SCHEMAREGISTRYSSL_KEYSTORELOCATION` | Path to the JKS keystore to communicate to SchemaRegistry |
| `KAFKA_CLUSTERS_0_SCHEMAREGISTRYSSL_KEYSTOREPASSWORD` | Password of the JKS keystore for SchemaRegistry |
| `KAFKA_CLUSTERS_0_METRICS_SSL` | Enable SSL for Metrics (for PROMETHEUS metrics type). Default: false. |
| `KAFKA_CLUSTERS_0_METRICS_USERNAME` | Username for Metrics authentication |
| `KAFKA_CLUSTERS_0_METRICS_PASSWORD` | Password for Metrics authentication |
| `KAFKA_CLUSTERS_0_METRICS_KEYSTORELOCATION` | Path to the JKS keystore to communicate to metrics source (JMX/PROMETHEUS). For advanced setup, see `kafka-ui-jmx-secured.yml` |
| `KAFKA_CLUSTERS_0_METRICS_KEYSTOREPASSWORD` | Password of the JKS metrics keystore |
| `KAFKA_CLUSTERS_0_SCHEMANAMETEMPLATE` | How keys are saved to schemaRegistry |
| `KAFKA_CLUSTERS_0_METRICS_PORT` | Open metrics port of a broker |
| `KAFKA_CLUSTERS_0_METRICS_TYPE` | Type of metrics retriever to use. Valid values are JMX (default) or PROMETHEUS. If Prometheus, then metrics are read from prometheus-jmx-exporter instead of jmx |
| `KAFKA_CLUSTERS_0_READONLY` | Enable read-only mode. Default: false |
| `KAFKA_CLUSTERS_0_KAFKACONNECT_0_NAME` | Given name for the Kafka Connect cluster |
| `KAFKA_CLUSTERS_0_KAFKACONNECT_0_ADDRESS` | Address of the Kafka Connect service endpoint |
| `KAFKA_CLUSTERS_0_KAFKACONNECT_0_USERNAME` | Kafka Connect cluster's basic authentication username |
| `KAFKA_CLUSTERS_0_KAFKACONNECT_0_PASSWORD` | Kafka Connect cluster's basic authentication password |
| `KAFKA_CLUSTERS_0_KAFKACONNECT_0_KEYSTORELOCATION` | Path to the JKS keystore to communicate to Kafka Connect |
| `KAFKA_CLUSTERS_0_KAFKACONNECT_0_KEYSTOREPASSWORD` | Password of the JKS keystore for Kafka Connect |
| `KAFKA_CLUSTERS_0_POLLING_THROTTLE_RATE` | Max traffic rate (bytes/sec) that kafka-ui allowed to reach when polling messages from the cluster. Default: 0 (not limited) |
| `KAFKA_CLUSTERS_0_SSL_TRUSTSTORELOCATION` | Path to the JKS truststore to communicate to Kafka Connect, SchemaRegistry, KSQL, Metrics |
| `KAFKA_CLUSTERS_0_SSL_TRUSTSTOREPASSWORD` | Password of the JKS truststore for Kafka Connect, SchemaRegistry, KSQL, Metrics |
| `TOPIC_RECREATE_DELAY_SECONDS` | Time delay between topic deletion and topic creation attempts for topic recreate functionality. Default: 1 |
| `TOPIC_RECREATE_MAXRETRIES` | Number of attempts of topic creation after topic deletion for topic recreate functionality. Default: 15 |
| `DYNAMIC_CONFIG_ENABLED` | Allow to change application config in runtime. Default: false. |
| kafka\_internalTopicPrefix | Set a prefix for internal topics. Defaults to "\_". |
| kafka.polling.pollTimeoutMs | Messages: Used as kafka consumer poll time (default is 1000). Useful if your message viewing fails with a slow broker |
| kafka.polling.partitionPollTimeout | Messages: Used as consumer poll time when polling one specific partition (used for newest first mode). The default is 200. Will increase result generation time. |
| kafka.polling.noDataEmptyPolls | Messages: It's a count of subsequent empty polls after that we assume that no more data is left in the topic/partition. The default is 3. |

View file

@ -1,20 +0,0 @@
# OpenDataDiscovery Integration
Kafka-ui has integration with the [OpenDataDiscovery platform](https://opendatadiscovery.org/) (ODD).
ODD Platform allows you to monitor and navigate kafka data streams and see how they embed into your data platform.
This integration allows you to use kafka-ui as an ODD "Collector" for kafka clusters.
Currently, kafka-ui exports:
* kafka topics as ODD Datasets with topic's metadata, configs, and schemas
* kafka-connect's connectors as ODD Transformers, including input & output topics and additional connector configs
Configuration properties:
| Env variable name | Yaml property | Description |
| ----------------------------- | --------------------------- | ----------------------------------------------------------------------------------------------- |
| INTEGATION\_ODD\_URL | integration.odd.ulr | ODD platform instance URL. Required. |
| INTEGRATION\_ODD\_TOKEN | integration.odd.token | Collector's token generated in ODD. Required. |
| INTEGRATION\_ODD\_TOPICSREGEX | integration.odd.topicsRegex | RegEx for topic names that should be exported to ODD. Optional, all topics exported by default. |

View file

@ -1,2 +0,0 @@
# Permissions

View file

@ -1,95 +0,0 @@
# MSK (+Serverless) Setup
This guide has been written for MSK Serverless but is applicable for MSK in general as well.
### Authentication options for Kafka-UI:
```
KAFKA_CLUSTERS_0_PROPERTIES_SECURITY_PROTOCOL=SASL_SSL
KAFKA_CLUSTERS_0_PROPERTIES_SASL_MECHANISM=AWS_MSK_IAM
KAFKA_CLUSTERS_0_PROPERTIES_SASL_JAAS_CONFIG='software.amazon.msk.auth.iam.IAMLoginModule required;'
KAFKA_CLUSTERS_0_PROPERTIES_SASL_CLIENT_CALLBACK_HANDLER_CLASS='software.amazon.msk.auth.iam.IAMClientCallbackHandler'
```
### Creating an instance
1. Go to the MSK page
2. Click "create cluster"
3. Choose "Custom create"
4. Choose "Serverless"
5. Choose VPC and subnets
6. Choose the default security group or use the existing one
### Creating a policy
1. Go to IAM policies
2. Click "create policy"
3. Click "JSON"
4. Paste the following policy example in the editor, and replace "MSK ARN" with the ARN of your MSK cluster
```
{
"Version": "2012-10-17",
"Statement": [
{
"Sid": "VisualEditor0",
"Effect": "Allow",
"Action": [
"kafka-cluster:DescribeCluster",
"kafka-cluster:AlterCluster",
"kafka-cluster:Connect"
],
"Resource": "arn:aws:kafka:eu-central-1:297478128798:cluster/test-wizard/7b39802a-21ac-48fe-b6e8-a7baf2ae2533-s2"
},
{
"Sid": "VisualEditor1",
"Effect": "Allow",
"Action": [
"kafka-cluster:DeleteGroup",
"kafka-cluster:DescribeCluster",
"kafka-cluster:ReadData",
"kafka-cluster:DescribeTopicDynamicConfiguration",
"kafka-cluster:AlterTopicDynamicConfiguration",
"kafka-cluster:AlterGroup",
"kafka-cluster:AlterClusterDynamicConfiguration",
"kafka-cluster:AlterTopic",
"kafka-cluster:CreateTopic",
"kafka-cluster:DescribeTopic",
"kafka-cluster:AlterCluster",
"kafka-cluster:DescribeGroup",
"kafka-cluster:DescribeClusterDynamicConfiguration",
"kafka-cluster:Connect",
"kafka-cluster:DeleteTopic",
"kafka-cluster:WriteData"
],
"Resource": "arn:aws:kafka:eu-central-1:297478128798:topic/test-wizard/7b39802a-21ac-48fe-b6e8-a7baf2ae2533-s2/*"
},
{
"Sid": "VisualEditor2",
"Effect": "Allow",
"Action": [
"kafka-cluster:AlterGroup",
"kafka-cluster:DescribeGroup"
],
"Resource": "arn:aws:kafka:eu-central-1:297478128798:group/test-wizard/7b39802a-21ac-48fe-b6e8-a7baf2ae2533-s2/*"
}
]
}
```
### Attaching the policy to a user
#### Creating a role for EC2
1. Go to IAM
2. Click "Create role"
3. Choose AWS Services and EC2
4. On the next page find the policy which has been created in the previous step
### Attaching the role to the EC2 instance
1. Go to EC2
2. Choose your EC2 with Kafka-UI
3. Go to Actions -> Security -> Modify IAM role
4. Choose the IAM role from previous step
5. Click Update IAM role

View file

@ -1,42 +0,0 @@
# Quick Start
## Quick start (demo run)
1. Ensure you have docker installed
2. Ensure your kafka cluster is available from the machine you're planning to run the app on
3. Run the following:
```
docker run -it -p 8080:8080 -e DYNAMIC_CONFIG_ENABLED=true provectuslabs/kafka-ui
```
4. Go to \`[http://localhost:8080/ui/clusters/create-new-cluster](http://localhost:8080/ui/clusters/create-new-cluster)\` and configure your first cluster by pressing on "Configure new cluster" button.
When you're done with testing, you can refer to the next articles to persist your config & deploy the app wherever you need to.
## Persistent start
Please ensure the target volume (`~/kui/config.yml`) of your config file does exist.
```
services:
kafka-ui:
container_name: kafka-ui
image: provectuslabs/kafka-ui:latest
ports:
- 8080:8080
environment:
DYNAMIC_CONFIG_ENABLED: true
volumes:
- ~/kui/config.yml:/etc/kafkaui/dynamic_config.yaml
```
## AWS Marketplace
To start the app quickly on AWS, please refer to **this page.**
The app in AWS Marketplace could be found by [this link](https://aws.amazon.com/marketplace/pp/prodview-ogtt5hfhzkq6a).
## Helm way
To install the app via Helm please refer to [this page](../helm-charts/quick-start.md).

View file

@ -1,53 +0,0 @@
---
description: How to Deploy Kafka UI from AWS Marketplace
---
# via AWS Marketplace
### Step 1: Go to AWS Marketplace
Go to the AWS Marketplace website and sign in to your account.
### Step 2: Find UI for Apache Kafka
Either use the search bar to find "UI for Apache Kafka" or go to [marketplace product page](https://aws.amazon.com/marketplace/pp/prodview-ogtt5hfhzkq6a).
### Step 3: Subscribe and Configure
Click "Continue to Subscribe" and accept the terms and conditions. Click "Continue to Configuration".
### Step 4: Choose the Software Version and Region
Choose your desired software version and region. Click "Continue to Launch".
### Step 5: Launch the Instance
Choose "Launch from Website" and select your desired EC2 instance type. You can choose a free tier instance or choose a larger instance depending on your needs. We recommend having at least 512 RAM for an instant.
Next, select the VPC and subnet where you want the instance to be launched. If you don't have an existing VPC or subnet, you can create one by clicking "Create New VPC" or "Create New Subnet".
Choose your security group. A security group acts as a virtual firewall that controls traffic to and from your instance. If you don't have an existing security group, you can create a new one based on the _seller settings_ by clicking "Create New Based on Seller Settings".
Give your security group a name and description. The _seller settings_ will automatically populate the inbound and outbound rules for the security group based on best practices. You can review and modify the rules if necessary.
Click "Save" to create your new security group.
Select your key pair or create a new one. A key pair is used to securely connect to your instance via SSH. If you choose to create a new key pair, give it a name and click "Create". Your private key will be downloaded to your computer, so make sure to keep it in a safe place.
Finally, click "Launch" to deploy your instance. AWS will create the instance and install the Kafka UI software for you.
### Step 6: Check EC2 Status
To check the EC2 state please click on "EC2 console".
### Step 7: Access the Kafka UI
After the instance is launched, you can check its status on the EC2 dashboard. Once it's running, you can access the Kafka UI by copying the public DNS name or IP address provided by AWS and adding after the IP address or DNS name port 8080.\
Example: `ec2-xx-xxx-x-xx.us-west-2.compute.amazonaws.com:8080`
### Step 8: Configure Kafka UI to Communicate with Brokers
If your broker is deployed in AWS then allow incoming from Kafka-ui EC2 by adding an ingress rule in the security group which is used for a broker.\
If your broker is not in AWS then be sure that your broker can handle requests from Kafka-ui EC2 IP address.
That's it! You've successfully deployed the Kafka UI from AWS Marketplace.

View file

@ -1,273 +0,0 @@
# RBAC (Role based access control)
## Role-based access control
In this article, we'll guide how to set up Kafka-UI with role-based access control.
### Authentication methods
First of all, you'd need to set up authentication method(s). Refer to [this](https://github.com/provectus/kafka-ui/wiki/OAuth-Configuration) article for OAuth2 setup.
### Config placement
First of all, you have to decide if either:
1. You wish to store all roles in a separate config file
2. Or within a main config file
This is how you include one more file to start with a docker-compose example:
```
services:
kafka-ui:
container_name: kafka-ui
image: provectuslabs/kafka-ui:latest
environment:
KAFKA_CLUSTERS_0_NAME: local
# other properties, omitted
SPRING_CONFIG_ADDITIONAL-LOCATION: /roles.yml
volumes:
- /tmp/roles.yml:/roles.yml
```
Alternatively, you can append the roles file contents to your main config file.
### Roles file structure
#### Clusters
In the roles file we define roles, duh. Every each role has access to defined clusters:
```
rbac:
roles:
- name: "memelords"
clusters:
- local
- dev
- staging
- prod
```
#### Subjects
A role also has a list of _subjects_ which are the entities we will use to assign roles to. They are provider-dependant, in general, they can be users, groups, or some other entities (github orgs, google domains, LDAP queries, etc.) In this example we define a role `memelords` that will contain all the users within the Google domain `memelord.lol` and, additionally, a GitHub user `Haarolean`. You can combine as many subjects as you want within a role.
```
- name: "memelords"
subjects:
- provider: oauth_google
type: domain
value: "memelord.lol"
- provider: oauth_github
type: user
value: "Haarolean"
```
#### Providers
A list of supported providers and corresponding subject fetch mechanism:
* oauth\_google: `user`, `domain`
* oauth\_github: `user`, `organization`
* oauth\_cognito: `user`, `group`
* ldap: `group`
* ldap\_ad: (unsupported yet, will do in 0.8 release)
Find the more detailed examples in a full example file lower.
#### Permissions
The next thing which is present in your roles file is, surprisingly, permissions. They consist of:
1. Resource Can be one of the: `CLUSTERCONFIG`, `TOPIC`, `CONSUMER`, `SCHEMA`, `CONNECT`, `KSQL`, `ACL`.
2. The resource value is either a fixed string or a regular expression identifying a resource. Value is not applicable to `clusterconfig` and `ksql` resources. Please do not fill it out.
3. Actions It's a list of actions (the possible values depend on the resource, see the lists below) that will be applied to the certain permission. Also, note, there's a special action for any of the resources called "all", it will virtually grant all the actions within the corresponding resource. An example for enabling viewing and creating topics whose name start with "derp":
```
permissions:
- resource: topic
value: "derp.*"
actions: [ VIEW, CREATE ]
```
**Actions**
A list of all the actions for the corresponding resources (please note neither resource nor action names are case-sensitive):
* `applicationconfig`: `view`, `edit`
* `clusterconfig`: `view`, `edit`
* `topic`: `view`, `create`, `edit`, `delete`, `messages_read`, `messages_produce`, `messages_delete`
* `consumer`: `view`, `delete`, `reset_offsets`
* `schema`: `view`, `create`, `delete`, `edit`, `modify_global_compatibility`
* `connect`: `view`, `edit`, `create`
* `ksql`: `execute`
* `acl`: `view`, `edit`
## Example file
**A complete file example:**
```
rbac:
roles:
- name: "memelords"
clusters:
- local
- dev
- staging
- prod
subjects:
- provider: oauth_google
type: domain
value: "memelord.lol"
- provider: oauth_google
type: user
value: "kek@memelord.lol"
- provider: oauth_github
type: organization
value: "memelords_team"
- provider: oauth_github
type: user
value: "memelord"
- provider: oauth_cognito
type: user
value: "username"
- provider: oauth_cognito
type: group
value: "memelords"
- provider: ldap
type: group
value: "admin_staff"
- provider: ldap_ad # NOT YET SUPPORTED, SEE ISSUE 3741
type: user
value: "cn=germanosin,dc=planetexpress,dc=com"
permissions:
- resource: applicationconfig
# value not applicable for applicationconfig
actions: [ "view", "edit" ] # can be with or without quotes
- resource: clusterconfig
# value not applicable for clusterconfig
actions: [ "view", "edit" ]
- resource: topic
value: "ololo.*"
actions: # can be a multiline list
- VIEW # can be upper or lowercase
- CREATE
- EDIT
- DELETE
- MESSAGES_READ
- MESSAGES_PRODUCE
- MESSAGES_DELETE
- resource: consumer
value: "\_confluent-ksql.*"
actions: [ VIEW, DELETE, RESET_OFFSETS ]
- resource: schema
value: "blah.*"
actions: [ VIEW, CREATE, DELETE, EDIT, MODIFY_GLOBAL_COMPATIBILITY ]
- resource: connect
value: "local"
actions: [ view, edit, create ]
# connectors selector not implemented yet, use connects
# selector:
# connector:
# name: ".*"
# class: 'com.provectus.connectorName'
- resource: ksql
# value not applicable for ksql
actions: [ execute ]
- resource: acl
# value not applicable for acl
actions: [ view, edit ]
```
**A read-only setup:**
```
rbac:
roles:
- name: "readonly"
clusters:
# FILL THIS
subjects:
# FILL THIS
permissions:
- resource: clusterconfig
actions: [ "view" ]
- resource: topic
value: ".*"
actions:
- VIEW
- MESSAGES_READ
- resource: consumer
value: ".*"
actions: [ view ]
- resource: schema
value: ".*"
actions: [ view ]
- resource: connect
value: ".*"
actions: [ view ]
- resource: acl
actions: [ view ]
```
**An admin-group setup example:**
```
rbac:
roles:
- name: "admins"
clusters:
# FILL THIS
subjects:
# FILL THIS
permissions:
- resource: applicationconfig
actions: all
- resource: clusterconfig
actions: all
- resource: topic
value: ".*"
actions: all
- resource: consumer
value: ".*"
actions: all
- resource: schema
value: ".*"
actions: all
- resource: connect
value: ".*"
actions: all
- resource: ksql
actions: all
- resource: acl
actions: [ view ]
```

View file

@ -1,77 +0,0 @@
---
description: The list of supported auth mechanisms for RBAC
---
# Supported Identity Providers
### Generic OAuth
Any OAuth provider which is not of the list: Google, GitHub, Cognito.
Set up the auth itself first, docs [here](../authentication/oauth2.md) and [here](../authentication/sso-guide.md)
```yaml
subjects:
- provider: oauth
type: role
value: "role-name"
```
### Google
Set up google auth [first](../authentication/oauth2.md#google)
```yaml
- provider: oauth_google
type: domain
value: "memelord.lol"
- provider: oauth_google
type: user
value: "kek@memelord.lol"
```
### Github
Set up github auth [first](../authentication/oauth2.md#github)
```yaml
- provider: oauth_github
type: organization
value: "provectus"
- provider: oauth_github
type: user
value: "memelord"
```
### Cognito
Set up cognito auth [first](../authentication/oauth2.md#cognito)
```yaml
- provider: oauth_cognito
type: user
value: "zoidberg"
- provider: oauth_cognito
type: group
value: "memelords"
```
### LDAP
Set up LDAP auth [first](../authentication/ldap-active-directory.md)
```yaml
- provider: ldap
type: group
value: "admin_staff"
```
### Active Directory
Not yet supported, see [Issue 3741](https://github.com/provectus/kafka-ui/issues/3741)
```yaml
- provider: ldap_ad # NOT YET SUPPORTED, SEE ISSUE 3741
type: group
value: "admin_staff"
```

View file

@ -1,187 +0,0 @@
---
description: Serialization, deserialization and custom plugins
---
# Serialization / SerDe
Kafka-ui supports multiple ways to serialize/deserialize data.
#### Int32, Int64, UInt32, UInt64
Big-endian 4/8 bytes representation of signed/unsigned integers.
#### Base64
Base64 (RFC4648) binary data representation. Can be useful in case if the actual data is not important, but exactly the same (byte-wise) key/value should be send.
#### String
Treats binary data as a string in specified encoding. Default encoding is UTF-8.
Class name: `com.provectus.kafka.ui.serdes.builtin.StringSerde`
Sample configuration (if you want to overwrite default configuration):
```yaml
kafka:
clusters:
- name: Cluster1
# Other Cluster configuration omitted ...
serde:
# registering String serde with custom config
- name: AsciiString
className: com.provectus.kafka.ui.serdes.builtin.StringSerde
properties:
encoding: "ASCII"
# overriding build-it String serde config
- name: String
properties:
encoding: "UTF-16"
```
#### ProtobufFile
Class name: `com.provectus.kafka.ui.serdes.builtin.ProtobufFileSerde`
Sample configuration:
```yaml
kafka:
clusters:
- name: Cluster1
# Other Cluster configuration omitted ...
serde:
- name: ProtobufFile
properties:
# protobufFilesDir specifies root location for proto files (will be scanned recursively)
# NOTE: if 'protobufFilesDir' specified, then 'protobufFile' and 'protobufFiles' settings will be ignored
protobufFilesDir: "/path/to/my-protobufs"
# (DEPRECATED) protobufFile is the path to the protobuf schema. (deprecated: please use "protobufFiles")
protobufFile: path/to/my.proto
# (DEPRECATED) protobufFiles is the location of one or more protobuf schemas
protobufFiles:
- /path/to/my-protobufs/my.proto
- /path/to/my-protobufs/another.proto
# protobufMessageName is the default protobuf type that is used to deserialize
# the message's value if the topic is not found in protobufMessageNameByTopic.
protobufMessageName: my.DefaultValType
# default protobuf type that is used for KEY serialization/deserialization
# optional
protobufMessageNameForKey: my.Type1
# mapping of topic names to protobuf types, that will be used for KEYS serialization/deserialization
# optional
protobufMessageNameForKeyByTopic:
topic1: my.KeyType1
topic2: my.KeyType2
# default protobuf type that is used for VALUE serialization/deserialization
# optional, if not set - first type in file will be used as default
protobufMessageName: my.Type1
# mapping of topic names to protobuf types, that will be used for VALUES serialization/deserialization
# optional
protobufMessageNameByTopic:
topic1: my.Type1
"topic.2": my.Type2
```
#### SchemaRegistry
SchemaRegistry serde is automatically configured if schema registry properties set on cluster level. But you can add new SchemaRegistry-typed serdes that will connect to another schema-registry instance.
Class name: `com.provectus.kafka.ui.serdes.builtin.sr.SchemaRegistrySerde`
Sample configuration:
```yaml
kafka:
clusters:
- name: Cluster1
# this url will be used by "SchemaRegistry" by default
schemaRegistry: http://main-schema-registry:8081
serde:
- name: AnotherSchemaRegistry
className: com.provectus.kafka.ui.serdes.builtin.sr.SchemaRegistrySerde
properties:
url: http://another-schema-registry:8081
# auth properties, optional
username: nameForAuth
password: P@ssW0RdForAuth
# and also add another SchemaRegistry serde
- name: ThirdSchemaRegistry
className: com.provectus.kafka.ui.serdes.builtin.sr.SchemaRegistrySerde
properties:
url: http://another-yet-schema-registry:8081
```
### Setting serdes for specific topics
You can specify preferable serde for topics key/value. This serde will be chosen by default in UI on topic's view/produce pages. To do so, set `topicValuesPattern/topicValuesPattern` properties for the selected serde. Kafka-ui will choose a first serde that matches specified pattern.
Sample configuration:
```yaml
kafka:
clusters:
- name: Cluster1
serde:
- name: String
topicKeysPattern: click-events|imp-events
- name: Int64
topicKeysPattern: ".*-events"
- name: SchemaRegistry
topicValuesPattern: click-events|imp-events
```
### Default serdes
You can specify which serde will be chosen in UI by default if no other serdes selected via `topicKeysPattern/topicValuesPattern` settings.
Sample configuration:
```yaml
kafka:
clusters:
- name: Cluster1
defaultKeySerde: Int32
defaultValueSerde: String
serde:
- name: Int32
topicKeysPattern: click-events|imp-events
```
### Fallback
If selected serde couldn't be applied (exception was thrown), then fallback (String serde with UTF-8 encoding) serde will be applied. Such messages will be specially highlighted in UI.
### Custom pluggable serde registration
You can implement your own serde and register it in kafka-ui application. To do so:
1. Add `kafka-ui-serde-api` dependency (should be downloadable via maven central)
2. Implement `com.provectus.kafka.ui.serde.api.Serde` interface. See javadoc for implementation requirements.
3. Pack your serde into uber jar, or provide directory with no-dependency jar and it's dependencies jars
Example pluggable serdes : [kafka-smile-serde](https://github.com/provectus/kafkaui-smile-serde), [kafka-glue-sr-serde](https://github.com/provectus/kafkaui-glue-sr-serde)
Sample configuration:
```yaml
kafka:
clusters:
- name: Cluster1
serde:
- name: MyCustomSerde
className: my.lovely.org.KafkaUiSerde
filePath: /var/lib/kui-serde/my-kui-serde.jar
- name: MyCustomSerde2
className: my.lovely.org.KafkaUiSerde2
filePath: /var/lib/kui-serde2
properties:
prop1: v1
```

View file

@ -1,10 +0,0 @@
# SSL
### Connecting to a Secure Broker
The app supports TLS (SSL) and SASL connections for [encryption and authentication](http://kafka.apache.org/090/documentation.html#security).\
#### Running From Docker-compose file
See this docker-compose file reference for ssl-enabled kafka

View file

@ -1,2 +0,0 @@
# Building

View file

@ -1,146 +0,0 @@
# Prerequisites
### Prerequisites
This page explains how to get the software you need to use on Linux or macOS for local development.
* `java 17` package or newer
* `git` installed
* `docker` installed
> Note: For contribution, you must have a `github` account.
#### For Linux
1. Install `OpenJDK 17` package or newer:
```
sudo apt update
sudo apt install openjdk-17-jdk
```
* Check java version using the command `java -version`.
```
openjdk version "17.0.5" 2022-10-18
OpenJDK Runtime Environment (build 17.0.5+8-Ubuntu-2ubuntu120.04)
OpenJDK 64-Bit Server VM (build 17.0.5+8-Ubuntu-2ubuntu120.04, mixed mode, sharing)
```
Note: In case OpenJDK 17 is not set as your default Java, run `sudo update-alternatives --config java` command to list all installed Java versions.
```
Selection Path Priority Status
------------------------------------------------------------
* 0 /usr/lib/jvm/java-11-openjdk-amd64/bin/java 1111 auto mode
1 /usr/lib/jvm/java-11-openjdk-amd64/bin/java 1111 manual mode
2 /usr/lib/jvm/java-16-openjdk-amd64/bin/java 1051 manual mode
3 /usr/lib/jvm/java-17-openjdk-amd64/bin/java 1001 manual mode
Press <enter> to keep the current choice[*], or type selection number:
```
you can set it as the default by entering the selection number for it in the list and pressing Enter. For example, to set Java 17 as the default, you would enter "3" and press **Enter**.
2. Install `git`:
```
sudo apt install git
```
3. Install `docker`:
```
sudo apt update
sudo apt install -y apt-transport-https ca-certificates curl software-properties-common
curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo apt-key add -
sudo add-apt-repository -y "deb [arch=amd64] https://download.docker.com/linux/ubuntu focal stable"
apt-cache policy docker-ce
sudo apt -y install docker-ce
```
To execute the `docker` Command without `sudo`:
```
sudo usermod -aG docker ${USER}
su - ${USER}
sudo chmod 666 /var/run/docker.sock
```
#### For macOS
1. Install [brew](https://brew.sh/).
2. Install brew cask:
```sh
brew cask
```
3. Install Eclipse Temurin 17 via Homebrew cask:
```sh
brew tap homebrew/cask-versions
brew install temurin17
```
4. Verify Installation
```sh
java -version
```
Note: In case OpenJDK 17 is not set as your default Java, you can consider including it in your `$PATH` after installation
```sh
export PATH="$(/usr/libexec/java_home -v 17)/bin:$PATH"
export JAVA_HOME="$(/usr/libexec/java_home -v 17)"
```
### Tips
Consider allocating not less than 4GB of memory for your docker. Otherwise, some apps within a stack (e.g. `kafka-ui.yaml`) might crash.
To check how much memory is allocated to docker, use `docker info`.
You will find the total memory and used memory in the output. if you won't find used memory that means memory limits are not set for containers.
**To allocate 4GB of memory for Docker:**
**MacOS**
Edit docker daemon settings within docker dashboard
**For Ubuntu**
1. Open the Docker configuration file in a text editor using the following command:
```
sudo nano /etc/default/docker
```
2. Add the following line to the file to allocate 4GB of memory to Docker:
```
DOCKER_OPTS="--default-ulimit memlock=-1:-1 --memory=4g --memory-swap=-1"
```
3. Save the file and exit the text editor.
4. Restart the Docker service using the following command:
```
sudo service docker restart
```
5. Verify that the memory limit has been set correctly by running the following command:
```
docker info | grep -i memory
```
Note that the warning messages are expected as they relate to the kernel not supporting cgroup memory limits.
Now any containers you run in docker will be limited to this amount of memory. You can also increase the memory limit as per your preference.
### Where to go next
In the next section, you'll learn how to Build and Run kafka-ui.

View file

@ -1,80 +0,0 @@
# With Docker
## Build & Run
Once you installed the prerequisites and cloned the repository, run the following steps in your project directory:
### Step 1 : Build
> _**NOTE:**_ If you are an macOS M1 User then please keep in mind below things
> Make sure you have ARM supported java installed
> Skip the maven tests as they might not be successful
* Build a docker image with the app
```
./mvnw clean install -Pprod
```
* if you need to build the frontend `kafka-ui-react-app`, go here
* kafka-ui-react-app-build-documentation
* In case you want to build `kafka-ui-api` by skipping the tests
```
./mvnw clean install -Dmaven.test.skip=true -Pprod
```
* To build only the `kafka-ui-api` you can use this command:
```
./mvnw -f kafka-ui-api/pom.xml clean install -Pprod -DskipUIBuild=true
```
If this step is successful, it should create a docker image named `provectuslabs/kafka-ui` with `latest` tag on your local machine except macOS M1.
### Step 2 : Run
**Using Docker Compose**
> _**NOTE:**_ If you are an macOS M1 User then you can use arm64 supported docker compose script `./documentation/compose/kafka-ui-arm64.yaml`
* Start the `kafka-ui` app using docker image built in step 1 along with Kafka clusters:
```
docker-compose -f ./documentation/compose/kafka-ui.yaml up -d
```
**Using Spring Boot Run**
* If you want to start only kafka clusters (to run the `kafka-ui` app via `spring-boot:run`):
```
docker-compose -f ./documentation/compose/kafka-clusters-only.yaml up -d
```
* Then start the app.
```
./mvnw spring-boot:run -Pprod
# or
./mvnw spring-boot:run -Pprod -Dspring.config.location=file:///path/to/conf.yaml
```
**Running in kubernetes**
* Using Helm Charts
```
helm repo add kafka-ui https://provectus.github.io/kafka-ui
helm install kafka-ui kafka-ui/kafka-ui
```
To read more please follow to chart documentation.
### Step 3 : Access Kafka-UI
* To see the `kafka-ui` app running, navigate to http://localhost:8080.

View file

@ -1,31 +0,0 @@
---
description: Build & Run Without Docker
---
# Without Docker
Once you installed the prerequisites and cloned the repository, run the following steps in your project directory:
### Running Without Docker Quickly <a href="#run_without_docker_quickly" id="run_without_docker_quickly"></a>
* [Download the latest kafka-ui jar file](https://github.com/provectus/kafka-ui/releases)
**Execute the jar**
```
java -Dspring.config.additional-location=<path-to-application-local.yml> --add-opens java.rmi/javax.rmi.ssl=ALL-UNNAMED -jar <path-to-kafka-ui-jar>
```
* Example of how to configure clusters in the [application-local.yml](https://github.com/provectus/kafka-ui/blob/master/kafka-ui-api/src/main/resources/application-local.yml) configuration file.
### Building And Running Without Docker <a href="#build_and_run_without_docker" id="build_and_run_without_docker"></a>
> _**NOTE:**_ If you want to get kafka-ui up and running locally quickly without building the jar file manually, then just follow Running Without Docker Quickly
> Comment out `docker-maven-plugin` plugin in `kafka-ui-api` pom.xml
* Command to build the jar
> Once your build is successful and the jar file named kafka-ui-api-0.0.1-SNAPSHOT.jar is generated inside `kafka-ui-api/target`.
* Execute the jar

View file

@ -1,14 +0,0 @@
# Setting up git
Set your git credentials:
```
git config --global user.name "Mona Lisa"
git config --global user.email "monalisa@louvre.net"
```
More info on setting git credentials:
[Setting your username in Git](https://docs.github.com/en/get-started/getting-started-with-git/setting-your-username-in-git)
[Setting your commit email address](https://docs.github.com/en/account-and-profile/setting-up-and-managing-your-personal-account-on-github/managing-email-preferences/setting-your-commit-email-address)

View file

@ -1,3 +0,0 @@
# WIP: Testing
TODO :)

View file

@ -0,0 +1,16 @@
# Descriptions of docker-compose configurations (*.yaml)
1. [kafka-ui.yaml](./kafka-ui.yaml) - Default configuration with 2 kafka clusters with two nodes of Schema Registry, one kafka-connect and a few dummy topics.
2. [kafka-ui-arm64.yaml](./kafka-ui-arm64.yaml) - Default configuration for ARM64(Mac M1) architecture with 1 kafka cluster without zookeeper with one node of Schema Registry, one kafka-connect and a few dummy topics.
3. [kafka-clusters-only.yaml](./kafka-clusters-only.yaml) - A configuration for development purposes, everything besides `kafka-ui` itself (to be run locally).
4. [kafka-ui-ssl.yml](./kafka-ssl.yml) - Connect to Kafka via TLS/SSL
5. [kafka-cluster-sr-auth.yaml](./kafka-cluster-sr-auth.yaml) - Schema registry with authentication.
6. [kafka-ui-auth-context.yaml](./kafka-ui-auth-context.yaml) - Basic (username/password) authentication with custom path (URL) (issue 861).
7. [e2e-tests.yaml](./e2e-tests.yaml) - Configuration with different connectors (github-source, s3, sink-activities, source-activities) and Ksql functionality.
8. [kafka-ui-jmx-secured.yml](./kafka-ui-jmx-secured.yml) - Kafkas JMX with SSL and authentication.
9. [kafka-ui-reverse-proxy.yaml](./nginx-proxy.yaml) - An example for using the app behind a proxy (like nginx).
10. [kafka-ui-sasl.yaml](./kafka-ui-sasl.yaml) - SASL auth for Kafka.
11. [kafka-ui-traefik-proxy.yaml](./traefik-proxy.yaml) - Traefik specific proxy configuration.
12. [oauth-cognito.yaml](./oauth-cognito.yaml) - OAuth2 with Cognito
13. [kafka-ui-with-jmx-exporter.yaml](./kafka-ui-with-jmx-exporter.yaml) - A configuration with 2 kafka clusters with enabled prometheus jmx exporters instead of jmx.
14. [kafka-with-zookeeper.yaml](./kafka-with-zookeeper.yaml) - An example for using kafka with zookeeper

View file

@ -0,0 +1,20 @@
{
"name": "github-source",
"config":
{
"connector.class": "io.confluent.connect.github.GithubSourceConnector",
"confluent.topic.bootstrap.servers": "kafka0:29092, kafka1:29092",
"confluent.topic.replication.factor": "1",
"tasks.max": "1",
"github.service.url": "https://api.github.com",
"github.access.token": "",
"github.repositories": "provectus/kafka-ui",
"github.resources": "issues,commits,pull_requests",
"github.since": "2019-01-01",
"topic.name.pattern": "github-${resourceName}",
"key.converter": "org.apache.kafka.connect.json.JsonConverter",
"key.converter.schema.registry.url": "http://schemaregistry0:8085",
"value.converter": "org.apache.kafka.connect.json.JsonConverter",
"value.converter.schema.registry.url": "http://schemaregistry0:8085"
}
}

View file

@ -0,0 +1,18 @@
{
"name": "s3-sink",
"config":
{
"connector.class": "io.confluent.connect.s3.S3SinkConnector",
"topics": "github-issues, github-pull_requests, github-commits",
"tasks.max": "1",
"s3.region": "eu-central-1",
"s3.bucket.name": "kafka-ui-s3-sink-connector",
"s3.part.size": "5242880",
"flush.size": "3",
"storage.class": "io.confluent.connect.s3.storage.S3Storage",
"format.class": "io.confluent.connect.s3.format.json.JsonFormat",
"schema.generator.class": "io.confluent.connect.storage.hive.schema.DefaultSchemaGenerator",
"partitioner.class": "io.confluent.connect.storage.partitioner.DefaultPartitioner",
"schema.compatibility": "BACKWARD"
}
}

View file

@ -0,0 +1,19 @@
{
"name": "sink_postgres_activities",
"config": {
"connector.class": "io.confluent.connect.jdbc.JdbcSinkConnector",
"connection.url": "jdbc:postgresql://postgres-db:5432/test",
"connection.user": "dev_user",
"connection.password": "12345",
"topics": "source-activities",
"table.name.format": "sink_activities",
"key.converter": "org.apache.kafka.connect.storage.StringConverter",
"key.converter.schema.registry.url": "http://schemaregistry0:8085",
"value.converter": "io.confluent.connect.avro.AvroConverter",
"value.converter.schema.registry.url": "http://schemaregistry0:8085",
"auto.create": "true",
"pk.mode": "record_value",
"pk.fields": "id",
"insert.mode": "upsert"
}
}

View file

@ -0,0 +1,20 @@
{
"name": "source_postgres_activities",
"config": {
"connector.class": "io.confluent.connect.jdbc.JdbcSourceConnector",
"connection.url": "jdbc:postgresql://postgres-db:5432/test",
"connection.user": "dev_user",
"connection.password": "12345",
"topic.prefix": "source-",
"poll.interval.ms": 3600000,
"table.whitelist": "public.activities",
"mode": "bulk",
"transforms": "extractkey",
"transforms.extractkey.type": "org.apache.kafka.connect.transforms.ExtractField$Key",
"transforms.extractkey.field": "id",
"key.converter": "org.apache.kafka.connect.storage.StringConverter",
"key.converter.schema.registry.url": "http://schemaregistry0:8085",
"value.converter": "io.confluent.connect.avro.AvroConverter",
"value.converter.schema.registry.url": "http://schemaregistry0:8085"
}
}

View file

@ -0,0 +1,9 @@
#! /bin/bash
while [[ "$(curl -s -o /dev/null -w ''%{http_code}'' kafka-connect0:8083)" != "200" ]]
do sleep 5
done
echo "\n --------------Creating connectors..."
for filename in /connectors/*.json; do
curl -X POST -H "Content-Type: application/json" -d @$filename http://kafka-connect0:8083/connectors
done

View file

@ -0,0 +1 @@
{}

View file

@ -0,0 +1,9 @@
server {
listen 80;
server_name localhost;
location /kafka-ui {
# rewrite /kafka-ui/(.*) /$1 break;
proxy_pass http://kafka-ui:8080;
}
}

View file

@ -0,0 +1,190 @@
---
version: '3.5'
services:
kafka-ui:
container_name: kafka-ui
image: provectuslabs/kafka-ui:latest
ports:
- 8080:8080
healthcheck:
test: wget --no-verbose --tries=1 --spider http://localhost:8080/actuator/health
interval: 30s
timeout: 10s
retries: 10
depends_on:
kafka0:
condition: service_healthy
schemaregistry0:
condition: service_healthy
kafka-connect0:
condition: service_healthy
environment:
KAFKA_CLUSTERS_0_NAME: local
KAFKA_CLUSTERS_0_BOOTSTRAPSERVERS: kafka0:29092
KAFKA_CLUSTERS_0_METRICS_PORT: 9997
KAFKA_CLUSTERS_0_SCHEMAREGISTRY: http://schemaregistry0:8085
KAFKA_CLUSTERS_0_KAFKACONNECT_0_NAME: first
KAFKA_CLUSTERS_0_KAFKACONNECT_0_ADDRESS: http://kafka-connect0:8083
KAFKA_CLUSTERS_0_KSQLDBSERVER: http://ksqldb:8088
kafka0:
image: confluentinc/cp-kafka:7.2.1
hostname: kafka0
container_name: kafka0
healthcheck:
test: unset JMX_PORT && KAFKA_JMX_OPTS="-Dcom.sun.management.jmxremote -Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.ssl=false -Djava.rmi.server.hostname=kafka0 -Dcom.sun.management.jmxremote.rmi.port=9999" && kafka-broker-api-versions --bootstrap-server=localhost:9092
interval: 30s
timeout: 10s
retries: 10
ports:
- "9092:9092"
- "9997:9997"
environment:
KAFKA_BROKER_ID: 1
KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: 'CONTROLLER:PLAINTEXT,PLAINTEXT:PLAINTEXT,PLAINTEXT_HOST:PLAINTEXT'
KAFKA_ADVERTISED_LISTENERS: 'PLAINTEXT://kafka0:29092,PLAINTEXT_HOST://localhost:9092'
KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 1
KAFKA_GROUP_INITIAL_REBALANCE_DELAY_MS: 0
KAFKA_TRANSACTION_STATE_LOG_MIN_ISR: 1
KAFKA_TRANSACTION_STATE_LOG_REPLICATION_FACTOR: 1
KAFKA_JMX_PORT: 9997
KAFKA_JMX_HOSTNAME: localhost
KAFKA_JMX_OPTS: -Dcom.sun.management.jmxremote -Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.ssl=false -Djava.rmi.server.hostname=kafka0 -Dcom.sun.management.jmxremote.rmi.port=9997
KAFKA_PROCESS_ROLES: 'broker,controller'
KAFKA_NODE_ID: 1
KAFKA_CONTROLLER_QUORUM_VOTERS: '1@kafka0:29093'
KAFKA_LISTENERS: 'PLAINTEXT://kafka0:29092,CONTROLLER://kafka0:29093,PLAINTEXT_HOST://0.0.0.0:9092'
KAFKA_INTER_BROKER_LISTENER_NAME: 'PLAINTEXT'
KAFKA_CONTROLLER_LISTENER_NAMES: 'CONTROLLER'
KAFKA_LOG_DIRS: '/tmp/kraft-combined-logs'
volumes:
- ./scripts/update_run.sh:/tmp/update_run.sh
command: "bash -c 'if [ ! -f /tmp/update_run.sh ]; then echo \"ERROR: Did you forget the update_run.sh file that came with this docker-compose.yml file?\" && exit 1 ; else /tmp/update_run.sh && /etc/confluent/docker/run ; fi'"
schemaregistry0:
image: confluentinc/cp-schema-registry:7.2.1
ports:
- 8085:8085
depends_on:
kafka0:
condition: service_healthy
healthcheck:
test: [ "CMD", "timeout", "1", "curl", "--silent", "--fail", "http://schemaregistry0:8085/subjects" ]
interval: 30s
timeout: 10s
retries: 10
environment:
SCHEMA_REGISTRY_KAFKASTORE_BOOTSTRAP_SERVERS: PLAINTEXT://kafka0:29092
SCHEMA_REGISTRY_KAFKASTORE_SECURITY_PROTOCOL: PLAINTEXT
SCHEMA_REGISTRY_HOST_NAME: schemaregistry0
SCHEMA_REGISTRY_LISTENERS: http://schemaregistry0:8085
SCHEMA_REGISTRY_SCHEMA_REGISTRY_INTER_INSTANCE_PROTOCOL: "http"
SCHEMA_REGISTRY_LOG4J_ROOT_LOGLEVEL: INFO
SCHEMA_REGISTRY_KAFKASTORE_TOPIC: _schemas
kafka-connect0:
build:
context: ./kafka-connect
args:
image: confluentinc/cp-kafka-connect:6.0.1
ports:
- 8083:8083
depends_on:
kafka0:
condition: service_healthy
schemaregistry0:
condition: service_healthy
healthcheck:
test: [ "CMD", "nc", "127.0.0.1", "8083" ]
interval: 30s
timeout: 10s
retries: 10
environment:
CONNECT_BOOTSTRAP_SERVERS: kafka0:29092
CONNECT_GROUP_ID: compose-connect-group
CONNECT_CONFIG_STORAGE_TOPIC: _connect_configs
CONNECT_CONFIG_STORAGE_REPLICATION_FACTOR: 1
CONNECT_OFFSET_STORAGE_TOPIC: _connect_offset
CONNECT_OFFSET_STORAGE_REPLICATION_FACTOR: 1
CONNECT_STATUS_STORAGE_TOPIC: _connect_status
CONNECT_STATUS_STORAGE_REPLICATION_FACTOR: 1
CONNECT_KEY_CONVERTER: org.apache.kafka.connect.storage.StringConverter
CONNECT_KEY_CONVERTER_SCHEMA_REGISTRY_URL: http://schemaregistry0:8085
CONNECT_VALUE_CONVERTER: org.apache.kafka.connect.storage.StringConverter
CONNECT_VALUE_CONVERTER_SCHEMA_REGISTRY_URL: http://schemaregistry0:8085
CONNECT_INTERNAL_KEY_CONVERTER: org.apache.kafka.connect.json.JsonConverter
CONNECT_INTERNAL_VALUE_CONVERTER: org.apache.kafka.connect.json.JsonConverter
CONNECT_REST_ADVERTISED_HOST_NAME: kafka-connect0
CONNECT_PLUGIN_PATH: "/usr/share/java,/usr/share/confluent-hub-components"
# AWS_ACCESS_KEY_ID: ""
# AWS_SECRET_ACCESS_KEY: ""
kafka-init-topics:
image: confluentinc/cp-kafka:7.2.1
volumes:
- ./data/message.json:/data/message.json
depends_on:
kafka0:
condition: service_healthy
command: "bash -c 'echo Waiting for Kafka to be ready... && \
cub kafka-ready -b kafka0:29092 1 30 && \
kafka-topics --create --topic users --partitions 3 --replication-factor 1 --if-not-exists --bootstrap-server kafka0:29092 && \
kafka-topics --create --topic messages --partitions 2 --replication-factor 1 --if-not-exists --bootstrap-server kafka0:29092 && \
kafka-console-producer --bootstrap-server kafka0:29092 --topic users < /data/message.json'"
postgres-db:
build:
context: ./postgres
args:
image: postgres:9.6.22
ports:
- 5432:5432
healthcheck:
test: [ "CMD-SHELL", "pg_isready -U dev_user" ]
interval: 10s
timeout: 5s
retries: 5
environment:
POSTGRES_USER: 'dev_user'
POSTGRES_PASSWORD: '12345'
create-connectors:
image: ellerbrock/alpine-bash-curl-ssl
depends_on:
postgres-db:
condition: service_healthy
kafka-connect0:
condition: service_healthy
volumes:
- ./connectors:/connectors
command: bash -c '/connectors/start.sh'
ksqldb:
image: confluentinc/ksqldb-server:0.18.0
healthcheck:
test: [ "CMD", "timeout", "1", "curl", "--silent", "--fail", "http://localhost:8088/info" ]
interval: 30s
timeout: 10s
retries: 10
depends_on:
kafka0:
condition: service_healthy
kafka-connect0:
condition: service_healthy
schemaregistry0:
condition: service_healthy
ports:
- 8088:8088
environment:
KSQL_CUB_KAFKA_TIMEOUT: 120
KSQL_LISTENERS: http://0.0.0.0:8088
KSQL_BOOTSTRAP_SERVERS: PLAINTEXT://kafka0:29092
KSQL_KSQL_LOGGING_PROCESSING_STREAM_AUTO_CREATE: "true"
KSQL_KSQL_LOGGING_PROCESSING_TOPIC_AUTO_CREATE: "true"
KSQL_KSQL_CONNECT_URL: http://kafka-connect0:8083
KSQL_KSQL_SCHEMA_REGISTRY_URL: http://schemaregistry0:8085
KSQL_KSQL_SERVICE_ID: my_ksql_1
KSQL_KSQL_HIDDEN_TOPICS: '^_.*'
KSQL_CACHE_MAX_BYTES_BUFFERING: 0

View file

@ -0,0 +1,3 @@
sasl.jaas.config=org.apache.kafka.common.security.plain.PlainLoginModule required username="admin" password="admin-secret";
security.protocol=SASL_PLAINTEXT
sasl.mechanism=PLAIN

View file

@ -0,0 +1,4 @@
KafkaConnect {
org.apache.kafka.connect.rest.basic.auth.extension.PropertyFileLoginModule required
file="/conf/kafka_connect.password";
};

View file

@ -0,0 +1 @@
admin: admin-secret

View file

@ -0,0 +1,18 @@
KafkaServer {
org.apache.kafka.common.security.plain.PlainLoginModule required
username="admin"
password="admin-secret"
user_admin="admin-secret"
user_enzo="cisternino";
};
KafkaClient {
org.apache.kafka.common.security.plain.PlainLoginModule required
user_admin="admin-secret";
};
Client {
org.apache.zookeeper.server.auth.DigestLoginModule required
username="zkuser"
password="zkuserpassword";
};

View file

@ -0,0 +1,5 @@
SchemaRegistryProps {
org.eclipse.jetty.jaas.spi.PropertyFileLoginModule required
file="/conf/schema_registry.password"
debug="false";
};

View file

@ -0,0 +1 @@
admin: OBF:1w8t1tvf1w261w8v1w1c1tvn1w8x,admin

View file

@ -0,0 +1,4 @@
Server {
org.apache.zookeeper.server.auth.DigestLoginModule required
user_zkuser="zkuserpassword";
};

View file

@ -0,0 +1,2 @@
rules:
- pattern: ".*"

View file

@ -0,0 +1,10 @@
#!/usr/bin/env bash
JAVA_AGENT_FILE="/usr/share/jmx_exporter/jmx_prometheus_javaagent.jar"
if [ ! -f "$JAVA_AGENT_FILE" ]
then
echo "Downloading jmx_exporter javaagent"
curl -o $JAVA_AGENT_FILE https://repo1.maven.org/maven2/io/prometheus/jmx/jmx_prometheus_javaagent/0.16.1/jmx_prometheus_javaagent-0.16.1.jar
fi
exec /etc/confluent/docker/run

Binary file not shown.

Some files were not shown because too many files have changed in this diff Show more