Compare commits

..

1 commit

Author SHA1 Message Date
Roman Zabaluev
c91f4e461b test newer spring boot and jdk17 2022-05-09 05:15:17 +04:00
1341 changed files with 59447 additions and 71253 deletions

View file

@ -1,36 +0,0 @@
{
"name": "Java",
"image": "mcr.microsoft.com/devcontainers/java:0-17",
"features": {
"ghcr.io/devcontainers/features/java:1": {
"version": "none",
"installMaven": "true",
"installGradle": "false"
},
"ghcr.io/devcontainers/features/docker-in-docker:2": {}
},
// Use 'forwardPorts' to make a list of ports inside the container available locally.
// "forwardPorts": [],
// Use 'postCreateCommand' to run commands after the container is created.
// "postCreateCommand": "java -version",
"customizations": {
"vscode": {
"extensions" : [
"vscjava.vscode-java-pack",
"vscjava.vscode-maven",
"vscjava.vscode-java-debug",
"EditorConfig.EditorConfig",
"ms-azuretools.vscode-docker",
"antfu.vite",
"ms-kubernetes-tools.vscode-kubernetes-tools",
"github.vscode-pull-request-github"
]
}
}
}

View file

@ -1,286 +0,0 @@
[*]
charset = utf-8
end_of_line = lf
indent_size = 4
indent_style = space
insert_final_newline = true
max_line_length = 120
tab_width = 4
ij_continuation_indent_size = 8
ij_formatter_off_tag = @formatter:off
ij_formatter_on_tag = @formatter:on
ij_formatter_tags_enabled = true
ij_smart_tabs = false
ij_visual_guides = none
ij_wrap_on_typing = false
trim_trailing_whitespace = true
[*.java]
indent_size = 2
ij_continuation_indent_size = 4
ij_java_align_consecutive_assignments = false
ij_java_align_consecutive_variable_declarations = false
ij_java_align_group_field_declarations = false
ij_java_align_multiline_annotation_parameters = false
ij_java_align_multiline_array_initializer_expression = false
ij_java_align_multiline_assignment = false
ij_java_align_multiline_binary_operation = false
ij_java_align_multiline_chained_methods = false
ij_java_align_multiline_extends_list = false
ij_java_align_multiline_for = true
ij_java_align_multiline_method_parentheses = false
ij_java_align_multiline_parameters = true
ij_java_align_multiline_parameters_in_calls = false
ij_java_align_multiline_parenthesized_expression = false
ij_java_align_multiline_records = true
ij_java_align_multiline_resources = true
ij_java_align_multiline_ternary_operation = false
ij_java_align_multiline_text_blocks = false
ij_java_align_multiline_throws_list = false
ij_java_align_subsequent_simple_methods = false
ij_java_align_throws_keyword = false
ij_java_align_types_in_multi_catch = true
ij_java_annotation_parameter_wrap = off
ij_java_array_initializer_new_line_after_left_brace = false
ij_java_array_initializer_right_brace_on_new_line = false
ij_java_array_initializer_wrap = normal
ij_java_assert_statement_colon_on_next_line = false
ij_java_assert_statement_wrap = normal
ij_java_assignment_wrap = normal
ij_java_binary_operation_sign_on_next_line = false
ij_java_binary_operation_wrap = normal
ij_java_blank_lines_after_anonymous_class_header = 0
ij_java_blank_lines_after_class_header = 0
ij_java_blank_lines_after_imports = 1
ij_java_blank_lines_after_package = 1
ij_java_blank_lines_around_class = 1
ij_java_blank_lines_around_field = 0
ij_java_blank_lines_around_field_in_interface = 0
ij_java_blank_lines_around_initializer = 1
ij_java_blank_lines_around_method = 1
ij_java_blank_lines_around_method_in_interface = 1
ij_java_blank_lines_before_class_end = 0
ij_java_blank_lines_before_imports = 1
ij_java_blank_lines_before_method_body = 0
ij_java_blank_lines_before_package = 1
ij_java_block_brace_style = end_of_line
ij_java_block_comment_add_space = false
ij_java_block_comment_at_first_column = true
ij_java_builder_methods = none
ij_java_call_parameters_new_line_after_left_paren = false
ij_java_call_parameters_right_paren_on_new_line = false
ij_java_call_parameters_wrap = normal
ij_java_case_statement_on_separate_line = true
ij_java_catch_on_new_line = false
ij_java_class_annotation_wrap = split_into_lines
ij_java_class_brace_style = end_of_line
ij_java_class_count_to_use_import_on_demand = 999
ij_java_class_names_in_javadoc = 1
ij_java_do_not_indent_top_level_class_members = false
ij_java_do_not_wrap_after_single_annotation = false
ij_java_do_not_wrap_after_single_annotation_in_parameter = false
ij_java_do_while_brace_force = always
ij_java_doc_add_blank_line_after_description = true
ij_java_doc_add_blank_line_after_param_comments = false
ij_java_doc_add_blank_line_after_return = false
ij_java_doc_add_p_tag_on_empty_lines = true
ij_java_doc_align_exception_comments = true
ij_java_doc_align_param_comments = true
ij_java_doc_do_not_wrap_if_one_line = false
ij_java_doc_enable_formatting = true
ij_java_doc_enable_leading_asterisks = true
ij_java_doc_indent_on_continuation = false
ij_java_doc_keep_empty_lines = true
ij_java_doc_keep_empty_parameter_tag = true
ij_java_doc_keep_empty_return_tag = true
ij_java_doc_keep_empty_throws_tag = true
ij_java_doc_keep_invalid_tags = true
ij_java_doc_param_description_on_new_line = false
ij_java_doc_preserve_line_breaks = false
ij_java_doc_use_throws_not_exception_tag = true
ij_java_else_on_new_line = false
ij_java_entity_dd_suffix = EJB
ij_java_entity_eb_suffix = Bean
ij_java_entity_hi_suffix = Home
ij_java_entity_lhi_prefix = Local
ij_java_entity_lhi_suffix = Home
ij_java_entity_li_prefix = Local
ij_java_entity_pk_class = java.lang.String
ij_java_entity_vo_suffix = VO
ij_java_enum_constants_wrap = normal
ij_java_extends_keyword_wrap = normal
ij_java_extends_list_wrap = normal
ij_java_field_annotation_wrap = split_into_lines
ij_java_finally_on_new_line = false
ij_java_for_brace_force = always
ij_java_for_statement_new_line_after_left_paren = false
ij_java_for_statement_right_paren_on_new_line = false
ij_java_for_statement_wrap = normal
ij_java_generate_final_locals = false
ij_java_generate_final_parameters = false
ij_java_if_brace_force = always
ij_java_imports_layout = $*,|,*
ij_java_indent_case_from_switch = true
ij_java_insert_inner_class_imports = false
ij_java_insert_override_annotation = true
ij_java_keep_blank_lines_before_right_brace = 2
ij_java_keep_blank_lines_between_package_declaration_and_header = 2
ij_java_keep_blank_lines_in_code = 2
ij_java_keep_blank_lines_in_declarations = 2
ij_java_keep_builder_methods_indents = false
ij_java_keep_control_statement_in_one_line = true
ij_java_keep_first_column_comment = true
ij_java_keep_indents_on_empty_lines = false
ij_java_keep_line_breaks = true
ij_java_keep_multiple_expressions_in_one_line = false
ij_java_keep_simple_blocks_in_one_line = false
ij_java_keep_simple_classes_in_one_line = false
ij_java_keep_simple_lambdas_in_one_line = false
ij_java_keep_simple_methods_in_one_line = false
ij_java_label_indent_absolute = false
ij_java_label_indent_size = 0
ij_java_lambda_brace_style = end_of_line
ij_java_layout_static_imports_separately = true
ij_java_line_comment_add_space = false
ij_java_line_comment_add_space_on_reformat = false
ij_java_line_comment_at_first_column = true
ij_java_message_dd_suffix = EJB
ij_java_message_eb_suffix = Bean
ij_java_method_annotation_wrap = split_into_lines
ij_java_method_brace_style = end_of_line
ij_java_method_call_chain_wrap = normal
ij_java_method_parameters_new_line_after_left_paren = false
ij_java_method_parameters_right_paren_on_new_line = false
ij_java_method_parameters_wrap = normal
ij_java_modifier_list_wrap = false
ij_java_multi_catch_types_wrap = normal
ij_java_names_count_to_use_import_on_demand = 999
ij_java_new_line_after_lparen_in_annotation = false
ij_java_new_line_after_lparen_in_record_header = false
ij_java_parameter_annotation_wrap = normal
ij_java_parentheses_expression_new_line_after_left_paren = false
ij_java_parentheses_expression_right_paren_on_new_line = false
ij_java_place_assignment_sign_on_next_line = false
ij_java_prefer_longer_names = true
ij_java_prefer_parameters_wrap = false
ij_java_record_components_wrap = normal
ij_java_repeat_synchronized = true
ij_java_replace_instanceof_and_cast = false
ij_java_replace_null_check = true
ij_java_replace_sum_lambda_with_method_ref = true
ij_java_resource_list_new_line_after_left_paren = false
ij_java_resource_list_right_paren_on_new_line = false
ij_java_resource_list_wrap = normal
ij_java_rparen_on_new_line_in_annotation = false
ij_java_rparen_on_new_line_in_record_header = false
ij_java_session_dd_suffix = EJB
ij_java_session_eb_suffix = Bean
ij_java_session_hi_suffix = Home
ij_java_session_lhi_prefix = Local
ij_java_session_lhi_suffix = Home
ij_java_session_li_prefix = Local
ij_java_session_si_suffix = Service
ij_java_space_after_closing_angle_bracket_in_type_argument = false
ij_java_space_after_colon = true
ij_java_space_after_comma = true
ij_java_space_after_comma_in_type_arguments = true
ij_java_space_after_for_semicolon = true
ij_java_space_after_quest = true
ij_java_space_after_type_cast = true
ij_java_space_before_annotation_array_initializer_left_brace = false
ij_java_space_before_annotation_parameter_list = false
ij_java_space_before_array_initializer_left_brace = true
ij_java_space_before_catch_keyword = true
ij_java_space_before_catch_left_brace = true
ij_java_space_before_catch_parentheses = true
ij_java_space_before_class_left_brace = true
ij_java_space_before_colon = true
ij_java_space_before_colon_in_foreach = true
ij_java_space_before_comma = false
ij_java_space_before_do_left_brace = true
ij_java_space_before_else_keyword = true
ij_java_space_before_else_left_brace = true
ij_java_space_before_finally_keyword = true
ij_java_space_before_finally_left_brace = true
ij_java_space_before_for_left_brace = true
ij_java_space_before_for_parentheses = true
ij_java_space_before_for_semicolon = false
ij_java_space_before_if_left_brace = true
ij_java_space_before_if_parentheses = true
ij_java_space_before_method_call_parentheses = false
ij_java_space_before_method_left_brace = true
ij_java_space_before_method_parentheses = false
ij_java_space_before_opening_angle_bracket_in_type_parameter = false
ij_java_space_before_quest = true
ij_java_space_before_switch_left_brace = true
ij_java_space_before_switch_parentheses = true
ij_java_space_before_synchronized_left_brace = true
ij_java_space_before_synchronized_parentheses = true
ij_java_space_before_try_left_brace = true
ij_java_space_before_try_parentheses = true
ij_java_space_before_type_parameter_list = false
ij_java_space_before_while_keyword = true
ij_java_space_before_while_left_brace = true
ij_java_space_before_while_parentheses = true
ij_java_space_inside_one_line_enum_braces = false
ij_java_space_within_empty_array_initializer_braces = false
ij_java_space_within_empty_method_call_parentheses = false
ij_java_space_within_empty_method_parentheses = false
ij_java_spaces_around_additive_operators = true
ij_java_spaces_around_annotation_eq = true
ij_java_spaces_around_assignment_operators = true
ij_java_spaces_around_bitwise_operators = true
ij_java_spaces_around_equality_operators = true
ij_java_spaces_around_lambda_arrow = true
ij_java_spaces_around_logical_operators = true
ij_java_spaces_around_method_ref_dbl_colon = false
ij_java_spaces_around_multiplicative_operators = true
ij_java_spaces_around_relational_operators = true
ij_java_spaces_around_shift_operators = true
ij_java_spaces_around_type_bounds_in_type_parameters = true
ij_java_spaces_around_unary_operator = false
ij_java_spaces_within_angle_brackets = false
ij_java_spaces_within_annotation_parentheses = false
ij_java_spaces_within_array_initializer_braces = false
ij_java_spaces_within_braces = false
ij_java_spaces_within_brackets = false
ij_java_spaces_within_cast_parentheses = false
ij_java_spaces_within_catch_parentheses = false
ij_java_spaces_within_for_parentheses = false
ij_java_spaces_within_if_parentheses = false
ij_java_spaces_within_method_call_parentheses = false
ij_java_spaces_within_method_parentheses = false
ij_java_spaces_within_parentheses = false
ij_java_spaces_within_record_header = false
ij_java_spaces_within_switch_parentheses = false
ij_java_spaces_within_synchronized_parentheses = false
ij_java_spaces_within_try_parentheses = false
ij_java_spaces_within_while_parentheses = false
ij_java_special_else_if_treatment = true
ij_java_subclass_name_suffix = Impl
ij_java_ternary_operation_signs_on_next_line = false
ij_java_ternary_operation_wrap = normal
ij_java_test_name_suffix = Test
ij_java_throws_keyword_wrap = normal
ij_java_throws_list_wrap = normal
ij_java_use_external_annotations = false
ij_java_use_fq_class_names = false
ij_java_use_relative_indents = false
ij_java_use_single_class_imports = true
ij_java_variable_annotation_wrap = normal
ij_java_visibility = public
ij_java_while_brace_force = always
ij_java_while_on_new_line = false
ij_java_wrap_comments = false
ij_java_wrap_first_method_in_call_chain = false
ij_java_wrap_long_lines = false
[*.md]
insert_final_newline = false
trim_trailing_whitespace = false
[*.yaml]
indent_size = 2
[*.yml]
indent_size = 2

4
.github/CODEOWNERS vendored
View file

@ -14,5 +14,5 @@
# TESTS
/kafka-ui-e2e-checks/ @provectus/kafka-qa
# INFRA
/.github/workflows/ @provectus/kafka-devops
# HELM CHARTS
/charts/ @provectus/kafka-devops

View file

@ -1,92 +0,0 @@
name: "\U0001F41E Bug report"
description: File a bug report
labels: ["status/triage", "type/bug"]
assignees: []
body:
- type: markdown
attributes:
value: |
Hi, thanks for raising the issue(-s), all contributions really matter!
Please, note that we'll close the issue without further explanation if you don't follow
this template and don't provide the information requested within this template.
- type: checkboxes
id: terms
attributes:
label: Issue submitter TODO list
description: By you checking these checkboxes we can be sure you've done the essential things.
options:
- label: I've looked up my issue in [FAQ](https://docs.kafka-ui.provectus.io/faq/common-problems)
required: true
- label: I've searched for an already existing issues [here](https://github.com/provectus/kafka-ui/issues)
required: true
- label: I've tried running `master`-labeled docker image and the issue still persists there
required: true
- label: I'm running a supported version of the application which is listed [here](https://github.com/provectus/kafka-ui/blob/master/SECURITY.md)
required: true
- type: textarea
attributes:
label: Describe the bug (actual behavior)
description: A clear and concise description of what the bug is. Use a list, if there is more than one problem
validations:
required: true
- type: textarea
attributes:
label: Expected behavior
description: A clear and concise description of what you expected to happen
validations:
required: false
- type: textarea
attributes:
label: Your installation details
description: |
How do you run the app? Please provide as much info as possible:
1. App version (commit hash in the top left corner of the UI)
2. Helm chart version, if you use one
3. Your application config. Please remove the sensitive info like passwords or API keys.
4. Any IAAC configs
validations:
required: true
- type: textarea
attributes:
label: Steps to reproduce
description: |
Please write down the order of the actions required to reproduce the issue.
For the advanced setups/complicated issue, we might need you to provide
a minimal [reproducible example](https://stackoverflow.com/help/minimal-reproducible-example).
validations:
required: true
- type: textarea
attributes:
label: Screenshots
description: |
If applicable, add screenshots to help explain your problem
validations:
required: false
- type: textarea
attributes:
label: Logs
description: |
If applicable, *upload* screenshots to help explain your problem
validations:
required: false
- type: textarea
attributes:
label: Additional context
description: |
Add any other context about the problem here. E.G.:
1. Are there any alternative scenarios (different data/methods/configuration/setup) you have tried?
Were they successful or the same issue occurred? Please provide steps as well.
2. Related issues (if there are any).
3. Logs (if available)
4. Is there any serious impact or behaviour on the end-user because of this issue, that can be overlooked?
validations:
required: false

40
.github/ISSUE_TEMPLATE/bug_report.md vendored Normal file
View file

@ -0,0 +1,40 @@
---
name: "\U0001F41E Bug report"
about: Create a bug report
title: ''
labels: status/triage, type/bug
assignees: ''
---
**Describe the bug**
<!--(A clear and concise description of what the bug is.)-->
**Set up**
<!--
(How do you run the app?
Which version of the app are you running? Provide either docker image version or check commit hash at the top left corner. We won't be able to help you without this information.)
-->
**Steps to Reproduce**
Steps to reproduce the behavior:
1.
**Expected behavior**
<!--
(A clear and concise description of what you expected to happen)
-->
**Screenshots**
<!--
(If applicable, add screenshots to help explain your problem)
-->
**Additional context**
<!--
(Add any other context about the problem here)
-->

View file

@ -1,14 +0,0 @@
blank_issues_enabled: false
contact_links:
- name: Report helm issue
url: https://github.com/provectus/kafka-ui-charts
about: Our helm charts are located in another repo. Please raise issues/PRs regarding charts in that repo.
- name: Official documentation
url: https://docs.kafka-ui.provectus.io/
about: Before reaching out for support, please refer to our documentation. Read "FAQ" and "Common problems", also try using search there.
- name: Community Discord
url: https://discord.gg/4DWzD7pGE5
about: Chat with other users, get some support or ask questions.
- name: GitHub Discussions
url: https://github.com/provectus/kafka-ui/discussions
about: An alternative place to ask questions or to get some support.

View file

@ -1,66 +0,0 @@
name: "\U0001F680 Feature request"
description: Propose a new feature
labels: ["status/triage", "type/feature"]
assignees: []
body:
- type: markdown
attributes:
value: |
Hi, thanks for raising the issue(-s), all contributions really matter!
Please, note that we'll close the issue without further explanation if you don't follow
this template and don't provide the information requested within this template.
- type: checkboxes
id: terms
attributes:
label: Issue submitter TODO list
description: By you checking these checkboxes we can be sure you've done the essential things.
options:
- label: I've searched for an already existing issues [here](https://github.com/provectus/kafka-ui/issues)
required: true
- label: I'm running a supported version of the application which is listed [here](https://github.com/provectus/kafka-ui/blob/master/SECURITY.md) and the feature is not present there
required: true
- type: textarea
attributes:
label: Is your proposal related to a problem?
description: |
Provide a clear and concise description of what the problem is.
For example, "I'm always frustrated when..."
validations:
required: false
- type: textarea
attributes:
label: Describe the feature you're interested in
description: |
Provide a clear and concise description of what you want to happen.
validations:
required: true
- type: textarea
attributes:
label: Describe alternatives you've considered
description: |
Let us know about other solutions you've tried or researched.
validations:
required: false
- type: input
attributes:
label: Version you're running
description: |
Please provide the app version you're currently running:
1. App version (commit hash in the top left corner of the UI)
validations:
required: true
- type: textarea
attributes:
label: Additional context
description: |
Is there anything else you can add about the proposal?
You might want to link to related issues here, if you haven't already.
validations:
required: false

View file

@ -0,0 +1,35 @@
---
name: "\U0001F680 Feature request"
about: Propose a new feature
title: ''
labels: status/triage, type/feature
assignees: ''
---
### Is your proposal related to a problem?
<!--
Provide a clear and concise description of what the problem is.
For example, "I'm always frustrated when..."
-->
### Describe the solution you'd like
<!--
Provide a clear and concise description of what you want to happen.
-->
### Describe alternatives you've considered
<!--
Let us know about other solutions you've tried or researched.
-->
### Additional context
<!--
Is there anything else you can add about the proposal?
You might want to link to related issues here, if you haven't already.
-->

40
.github/ISSUE_TEMPLATE/k8s_whine.md vendored Normal file
View file

@ -0,0 +1,40 @@
---
name: "⎈ K8s/Helm problem report"
about: Report a problem with k8s/helm charts/etc
title: ''
labels: scope/k8s, status/triage
assignees: azatsafin, 5hin0bi
---
**Describe the bug**
<!--(A clear and concise description of what the bug is.)-->
**Set up**
<!--
(How do you run the app?
Which version of the app are you running? Provide either docker image version or check commit hash at the top left corner. We won't be able to help you without this information.)
-->
**Steps to Reproduce**
Steps to reproduce the behavior:
1.
**Expected behavior**
<!--
(A clear and concise description of what you expected to happen)
-->
**Screenshots**
<!--
(If applicable, add screenshots to help explain your problem)
-->
**Additional context**
<!--
(Add any other context about the problem here)
-->

View file

@ -8,6 +8,8 @@ updates:
timezone: Europe/Moscow
reviewers:
- "Haarolean"
assignees:
- "Haarolean"
labels:
- "scope/backend"
- "type/dependencies"
@ -97,6 +99,8 @@ updates:
timezone: Europe/Moscow
reviewers:
- "Haarolean"
assignees:
- "Haarolean"
labels:
- "scope/infrastructure"
- "type/dependencies"

View file

@ -9,33 +9,21 @@ template: |
exclude-labels:
- 'scope/infrastructure'
- 'scope/QA'
- 'scope/AQA'
- 'type/dependencies'
- 'type/chore'
- 'type/documentation'
- 'type/refactoring'
categories:
- title: '🚩 Breaking Changes'
labels:
- 'impact/changelog'
- title: '⚙Features'
labels:
- 'type/feature'
- title: '🪛Enhancements'
labels:
- 'type/enhancement'
- title: '🔨Bug Fixes'
labels:
- 'type/bug'
- title: 'Security'
labels:
- 'type/security'
- title: '⎈ Helm/K8S Changes'
labels:
- 'scope/k8s'

View file

@ -1,4 +1,4 @@
name: "Infra: Release: AWS Marketplace Publisher"
name: AWS Marketplace Publisher
on:
workflow_dispatch:
inputs:
@ -10,11 +10,6 @@ on:
description: 'Version of KafkaUI'
required: true
default: '0.3.2'
PublishOnMarketplace:
description: 'If set to true, the request to update AWS Server product version will be raised'
required: true
default: false
type: boolean
jobs:
build-ami:
@ -24,14 +19,14 @@ jobs:
- name: Clone infra repo
run: |
echo "Cloning repo..."
git clone https://infra-tech:${{ secrets.INFRA_USER_ACCESS_TOKEN }}@github.com/provectus/kafka-ui-infra.git --branch ${{ github.event.inputs.KafkaUIInfraBranch }}
git clone https://kafka-ui-infra:${{ secrets.KAFKA_UI_INFRA_TOKEN }}@gitlab.provectus.com/provectus-internals/kafka-ui-infra.git --branch ${{ github.event.inputs.KafkaUIInfraBranch }}
echo "Cd to packer DIR..."
cd kafka-ui-infra/ami
echo "WORK_DIR=$(pwd)" >> $GITHUB_ENV
echo "Packer will be triggered in this dir $WORK_DIR"
- name: Configure AWS credentials for Kafka-UI account
uses: aws-actions/configure-aws-credentials@v3
uses: aws-actions/configure-aws-credentials@v1
with:
aws-access-key-id: ${{ secrets.AWS_AMI_PUBLISH_KEY_ID }}
aws-secret-access-key: ${{ secrets.AWS_AMI_PUBLISH_KEY_SECRET }}
@ -51,27 +46,6 @@ jobs:
with:
command: build
arguments: "-color=false -on-error=abort -var=kafka_ui_release_version=${{ github.event.inputs.KafkaUIReleaseVersion }}"
target: kafka-ui.pkr.hcl
working_directory: ${{ env.WORK_DIR }}
target: kafka-ui-infra/ami/kafka-ui.pkr.hcl
env:
PACKER_LOG: 1
# add fresh AMI to AWS Marketplace
- name: Publish Artifact at Marketplace
if: ${{ github.event.inputs.PublishOnMarketplace == 'true' }}
env:
PRODUCT_ID: ${{ secrets.AWS_SERVER_PRODUCT_ID }}
RELEASE_VERSION: "${{ github.event.inputs.KafkaUIReleaseVersion }}"
RELEASE_NOTES: "https://github.com/provectus/kafka-ui/releases/tag/v${{ github.event.inputs.KafkaUIReleaseVersion }}"
MP_ROLE_ARN: ${{ secrets.AWS_MARKETPLACE_AMI_ACCESS_ROLE }} # https://docs.aws.amazon.com/marketplace/latest/userguide/ami-single-ami-products.html#single-ami-marketplace-ami-access
AMI_OS_VERSION: "amzn2-ami-kernel-5.10-hvm-*-x86_64-gp2"
run: |
set -x
pwd
ls -la kafka-ui-infra/ami
echo $WORK_DIR/manifest.json
export AMI_ID=$(jq -r '.builds[-1].artifact_id' kafka-ui-infra/ami/manifest.json | cut -d ":" -f2)
/bin/bash kafka-ui-infra/aws-marketplace/prepare_changeset.sh > changeset.json
aws marketplace-catalog start-change-set \
--catalog "AWSMarketplace" \
--change-set "$(cat changeset.json)"

View file

@ -1,4 +1,4 @@
name: "Backend: PR/master build & test"
name: backend
on:
push:
branches:
@ -8,49 +8,49 @@ on:
paths:
- "kafka-ui-api/**"
- "pom.xml"
permissions:
checks: write
pull-requests: write
jobs:
build-and-test:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- uses: actions/checkout@v2
with:
fetch-depth: 0
ref: ${{ github.event.pull_request.head.sha }}
- name: Set up JDK
uses: actions/setup-java@v3
- name: Cache local Maven repository
uses: actions/cache@v2
with:
java-version: '17'
distribution: 'zulu'
cache: 'maven'
path: ~/.m2/repository
key: ${{ runner.os }}-maven-${{ hashFiles('**/pom.xml') }}
restore-keys: |
${{ runner.os }}-maven-
- name: Set up JDK 1.13
uses: actions/setup-java@v1
with:
java-version: 1.13
- name: Cache SonarCloud packages
uses: actions/cache@v3
uses: actions/cache@v1
with:
path: ~/.sonar/cache
key: ${{ runner.os }}-sonar
restore-keys: ${{ runner.os }}-sonar
- name: Build and analyze pull request target
if: ${{ github.event_name == 'pull_request' }}
if: ${{ github.event_name == 'pull_request_target' }}
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
SONAR_TOKEN: ${{ secrets.SONAR_TOKEN_BACKEND }}
HEAD_REF: ${{ github.head_ref }}
BASE_REF: ${{ github.base_ref }}
run: |
./mvnw -B -ntp versions:set -DnewVersion=${{ github.event.pull_request.head.sha }}
./mvnw -B -V -ntp verify org.sonarsource.scanner.maven:sonar-maven-plugin:sonar \
mvn versions:set -DnewVersion=${{ github.event.pull_request.head.sha }}
mvn -B verify org.sonarsource.scanner.maven:sonar-maven-plugin:sonar \
-Dsonar.projectKey=com.provectus:kafka-ui_backend \
-Dsonar.pullrequest.key=${{ github.event.pull_request.number }} \
-Dsonar.pullrequest.branch=$HEAD_REF \
-Dsonar.pullrequest.base=$BASE_REF
-Dsonar.pullrequest.branch=${{ github.head_ref }} \
-Dsonar.pullrequest.base=${{ github.base_ref }}
- name: Build and analyze push master
if: ${{ github.event_name == 'push' }}
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
SONAR_TOKEN: ${{ secrets.SONAR_TOKEN_BACKEND }}
run: |
./mvnw -B -ntp versions:set -DnewVersion=$GITHUB_SHA
./mvnw -B -V -ntp verify org.sonarsource.scanner.maven:sonar-maven-plugin:sonar \
mvn versions:set -DnewVersion=$GITHUB_SHA
mvn -B verify org.sonarsource.scanner.maven:sonar-maven-plugin:sonar \
-Dsonar.projectKey=com.provectus:kafka-ui_backend

View file

@ -1,4 +1,4 @@
name: "Infra: PR block merge"
name: Pull Request Labels
on:
pull_request:
types: [opened, labeled, unlabeled, synchronize]
@ -6,7 +6,7 @@ jobs:
block_merge:
runs-on: ubuntu-latest
steps:
- uses: mheap/github-action-required-labels@v5
- uses: mheap/github-action-required-labels@v1
with:
mode: exactly
count: 0

View file

@ -1,4 +1,4 @@
name: "Infra: Feature Testing: Init env"
name: DeployFromBranch
on:
workflow_dispatch:
@ -9,43 +9,50 @@ jobs:
if: ${{ github.event.label.name == 'status/feature_testing' || github.event.label.name == 'status/feature_testing_public' }}
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
with:
ref: ${{ github.event.pull_request.head.sha }}
- uses: actions/checkout@v2
- name: get branch name
id: extract_branch
run: |
tag='pr${{ github.event.pull_request.number }}'
echo "tag=${tag}" >> $GITHUB_OUTPUT
hub pr checkout ${{ github.event.pull_request.number }}
branch_name=$(hub branch | grep "*" | sed -e 's/^\*//')
echo $branch_name
echo ::set-output name=branch::${branch_name}
tag=$(echo $branch_name | sed 's/\//-/g' | sed 's/\./-/g' | sed 's/\_/-/g' | sed -e 's/\(.*\)/\L\1/' | cut -c1-32 | sed -E 's/(^[^a-z0-9])*([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*)([^a-z0-9]*)/\2/')
echo ::set-output name=tag::${tag}
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- name: Set up JDK
uses: actions/setup-java@v3
- name: Cache local Maven repository
uses: actions/cache@v2
with:
java-version: '17'
distribution: 'zulu'
cache: 'maven'
path: ~/.m2/repository
key: ${{ runner.os }}-maven-${{ hashFiles('**/pom.xml') }}
restore-keys: |
${{ runner.os }}-maven-
- name: Set up JDK 1.13
uses: actions/setup-java@v1
with:
java-version: 1.13
- name: Build
id: build
run: |
./mvnw -B -ntp versions:set -DnewVersion=$GITHUB_SHA
./mvnw -B -V -ntp clean package -Pprod -DskipTests
export VERSION=$(./mvnw -q -Dexec.executable=echo -Dexec.args='${project.version}' --non-recursive exec:exec)
echo "version=${VERSION}" >> $GITHUB_OUTPUT
mvn versions:set -DnewVersion=$GITHUB_SHA
mvn clean package -Pprod -DskipTests
export VERSION=$(mvn -q -Dexec.executable=echo -Dexec.args='${project.version}' --non-recursive exec:exec)
echo "::set-output name=version::${VERSION}"
- name: Set up QEMU
uses: docker/setup-qemu-action@v2
uses: docker/setup-qemu-action@v1
- name: Set up Docker Buildx
id: buildx
uses: docker/setup-buildx-action@v2
uses: docker/setup-buildx-action@v1
- name: Cache Docker layers
uses: actions/cache@v3
uses: actions/cache@v2
with:
path: /tmp/.buildx-cache
key: ${{ runner.os }}-buildx-${{ github.sha }}
restore-keys: |
${{ runner.os }}-buildx-
- name: Configure AWS credentials for Kafka-UI account
uses: aws-actions/configure-aws-credentials@v3
uses: aws-actions/configure-aws-credentials@v1
with:
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
@ -55,7 +62,7 @@ jobs:
uses: aws-actions/amazon-ecr-login@v1
- name: Build and push
id: docker_build_and_push
uses: docker/build-push-action@v4
uses: docker/build-push-action@v2
with:
builder: ${{ steps.buildx.outputs.name }}
context: kafka-ui-api
@ -73,33 +80,29 @@ jobs:
steps:
- name: clone
run: |
git clone https://infra-tech:${{ secrets.INFRA_USER_ACCESS_TOKEN }}@github.com/provectus/kafka-ui-infra.git --branch envs
git clone https://kafka-ui-infra:${{ secrets.KAFKA_UI_INFRA_TOKEN }}@gitlab.provectus.com/provectus-internals/kafka-ui-infra.git
- name: create deployment
run: |
cd kafka-ui-infra/aws-infrastructure4eks/argocd/scripts
echo "Branch:${{ needs.build.outputs.tag }}"
./kafka-ui-deployment-from-branch.sh ${{ needs.build.outputs.tag }} ${{ github.event.label.name }} ${{ secrets.FEATURE_TESTING_UI_PASSWORD }}
git config --global user.email "infra-tech@provectus.com"
git config --global user.name "infra-tech"
git config --global user.email "kafka-ui-infra@provectus.com"
git config --global user.name "kafka-ui-infra"
git add ../kafka-ui-from-branch/
git commit -m "added env:${{ needs.build.outputs.deploy }}" && git push || true
- name: update status check for private deployment
- name: make comment with private deployment link
if: ${{ github.event.label.name == 'status/feature_testing' }}
uses: Sibz/github-status-action@v1.1.6
uses: peter-evans/create-or-update-comment@v2
with:
authToken: ${{secrets.GITHUB_TOKEN}}
context: "Click Details button to open custom deployment page"
state: "success"
sha: ${{ github.event.pull_request.head.sha || github.sha }}
target_url: "http://${{ needs.build.outputs.tag }}.internal.kafka-ui.provectus.io"
issue-number: ${{ github.event.pull_request.number }}
body: |
Custom deployment will be available at http://${{ needs.build.outputs.tag }}.internal.kafka-ui.provectus.io
- name: update status check for public deployment
- name: make comment with public deployment link
if: ${{ github.event.label.name == 'status/feature_testing_public' }}
uses: Sibz/github-status-action@v1.1.6
uses: peter-evans/create-or-update-comment@v2
with:
authToken: ${{secrets.GITHUB_TOKEN}}
context: "Click Details button to open custom deployment page"
state: "success"
sha: ${{ github.event.pull_request.head.sha || github.sha }}
target_url: "http://${{ needs.build.outputs.tag }}.internal.kafka-ui.provectus.io"
issue-number: ${{ github.event.pull_request.number }}
body: |
Custom deployment will be available at http://${{ needs.build.outputs.tag }}.kafka-ui.provectus.io in 5 minutes

View file

@ -1,22 +1,40 @@
name: "Infra: Feature Testing: Destroy env"
name: RemoveCustomDeployment
on:
workflow_dispatch:
pull_request:
types: ['unlabeled', 'closed']
jobs:
remove:
if: ${{ github.event.label.name == 'status/feature_testing' || github.event.label.name == 'status/feature_testing_public' }}
runs-on: ubuntu-latest
if: ${{ (github.event.label.name == 'status/feature_testing' || github.event.label.name == 'status/feature_testing_public') || (github.event.action == 'closed' && (contains(github.event.pull_request.labels.*.name, 'status/feature_testing') || contains(github.event.pull_request.labels.*.name, 'status/feature_testing_public'))) }}
steps:
- uses: actions/checkout@v3
- uses: actions/checkout@v2
- name: get branch name
id: extract_branch
run: |
hub pr checkout ${{ github.event.pull_request.number }}
branch_name=$(hub branch | grep "*" | sed -e 's/^\*//')
echo $branch_name
echo ::set-output name=branch::${branch_name}
tag=$(echo $branch_name | sed 's/\//-/g' | sed 's/\./-/g' | sed 's/\_/-/g' | cut -c1-32)
echo ::set-output name=tag::${tag}
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- name: clone
run: |
git clone https://infra-tech:${{ secrets.INFRA_USER_ACCESS_TOKEN }}@github.com/provectus/kafka-ui-infra.git --branch envs
git clone https://kafka-ui-infra:${{ secrets.KAFKA_UI_INFRA_TOKEN }}@gitlab.provectus.com/provectus-internals/kafka-ui-infra.git
- name: remove env
run: |
cd kafka-ui-infra/aws-infrastructure4eks/argocd/scripts
./delete-env.sh pr${{ github.event.pull_request.number }} || true
git config --global user.email "infra-tech@provectus.com"
git config --global user.name "infra-tech"
echo "Branch:${{ steps.extract_branch.outputs.tag }}"
./delete-env.sh ${{ steps.extract_branch.outputs.tag }}
git config --global user.email "kafka-ui-infra@provectus.com"
git config --global user.name "kafka-ui-infra"
git add ../kafka-ui-from-branch/
git commit -m "removed env:${{ needs.build.outputs.deploy }}" && git push || true
- name: make comment with deployment link
uses: peter-evans/create-or-update-comment@v2
with:
issue-number: ${{ github.event.pull_request.number }}
body: |
Custom deployment removed

View file

@ -1,74 +0,0 @@
name: "Infra: Image Testing: Deploy"
on:
workflow_dispatch:
pull_request:
types: ['labeled']
jobs:
build:
if: ${{ github.event.label.name == 'status/image_testing' }}
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
with:
ref: ${{ github.event.pull_request.head.sha }}
- name: get branch name
id: extract_branch
run: |
tag='${{ github.event.pull_request.number }}'
echo "tag=${tag}" >> $GITHUB_OUTPUT
- name: Set up JDK
uses: actions/setup-java@v3
with:
java-version: '17'
distribution: 'zulu'
cache: 'maven'
- name: Build
id: build
run: |
./mvnw -B -ntp versions:set -DnewVersion=$GITHUB_SHA
./mvnw -B -V -ntp clean package -Pprod -DskipTests
export VERSION=$(./mvnw -q -Dexec.executable=echo -Dexec.args='${project.version}' --non-recursive exec:exec)
echo "version=${VERSION}" >> $GITHUB_OUTPUT
- name: Set up QEMU
uses: docker/setup-qemu-action@v2
- name: Set up Docker Buildx
id: buildx
uses: docker/setup-buildx-action@v2
- name: Cache Docker layers
uses: actions/cache@v3
with:
path: /tmp/.buildx-cache
key: ${{ runner.os }}-buildx-${{ github.sha }}
restore-keys: |
${{ runner.os }}-buildx-
- name: Configure AWS credentials for Kafka-UI account
uses: aws-actions/configure-aws-credentials@v3
with:
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
aws-region: us-east-1
- name: Login to Amazon ECR
id: login-ecr
uses: aws-actions/amazon-ecr-login@v1
with:
registry-type: 'public'
- name: Build and push
id: docker_build_and_push
uses: docker/build-push-action@v4
with:
builder: ${{ steps.buildx.outputs.name }}
context: kafka-ui-api
push: true
tags: public.ecr.aws/provectus/kafka-ui-custom-build:${{ steps.extract_branch.outputs.tag }}
build-args: |
JAR_FILE=kafka-ui-api-${{ steps.build.outputs.version }}.jar
cache-from: type=local,src=/tmp/.buildx-cache
cache-to: type=local,dest=/tmp/.buildx-cache
- name: make comment with private deployment link
uses: peter-evans/create-or-update-comment@v3
with:
issue-number: ${{ github.event.pull_request.number }}
body: |
Image published at public.ecr.aws/provectus/kafka-ui-custom-build:${{ steps.extract_branch.outputs.tag }}
outputs:
tag: ${{ steps.extract_branch.outputs.tag }}

View file

@ -20,8 +20,6 @@ on:
paths:
- 'kafka-ui-contract/**'
- 'kafka-ui-react-app/**'
- 'kafka-ui-api/**'
- 'kafka-ui-serde-api/**'
schedule:
- cron: '39 15 * * 6'
@ -33,18 +31,18 @@ jobs:
strategy:
fail-fast: false
matrix:
language: [ 'javascript', 'java' ]
language: [ 'javascript' ]
# CodeQL supports [ 'cpp', 'csharp', 'go', 'java', 'javascript', 'python' ]
# Learn more:
# https://docs.github.com/en/free-pro-team@latest/github/finding-security-vulnerabilities-and-errors-in-your-code/configuring-code-scanning#changing-the-languages-that-are-analyzed
steps:
- name: Checkout repository
uses: actions/checkout@v3
uses: actions/checkout@v2
# Initializes the CodeQL tools for scanning.
- name: Initialize CodeQL
uses: github/codeql-action/init@v2
uses: github/codeql-action/init@v1
with:
languages: ${{ matrix.language }}
# If you wish to specify custom queries, you can do so here or in a config file.
@ -52,17 +50,10 @@ jobs:
# Prefix the list here with "+" to use these queries and those in the config file.
# queries: ./path/to/local/query, your-org/your-repo/queries@main
- name: Set up JDK
uses: actions/setup-java@v3
with:
java-version: '17'
distribution: 'zulu'
cache: 'maven'
# Autobuild attempts to build any compiled languages (C/C++, C#, or Java).
# If this step fails, then you should remove it and run the build manually (see below)
- name: Autobuild
uses: github/codeql-action/autobuild@v2
uses: github/codeql-action/autobuild@v1
# Command-line programs to run using the OS shell.
# 📚 https://git.io/JvXDl
@ -76,4 +67,4 @@ jobs:
# make release
- name: Perform CodeQL Analysis
uses: github/codeql-action/analyze@v2
uses: github/codeql-action/analyze@v1

View file

@ -8,31 +8,37 @@ jobs:
build-and-test:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- uses: actions/checkout@v2
- name: Set up JDK
uses: actions/setup-java@v3
- name: Cache local Maven repository
uses: actions/cache@v2
with:
java-version: '17'
distribution: 'zulu'
cache: 'maven'
path: ~/.m2/repository
key: ${{ runner.os }}-maven-${{ hashFiles('**/pom.xml') }}
restore-keys: |
${{ runner.os }}-maven-
- name: Set up JDK 1.13
uses: actions/setup-java@v1
with:
java-version: 1.13
- name: Build project
id: build
run: |
./mvnw -B -ntp versions:set -DnewVersion=$GITHUB_SHA
./mvnw -B -V -ntp clean package -DskipTests
export VERSION=$(./mvnw -q -Dexec.executable=echo -Dexec.args='${project.version}' --non-recursive exec:exec)
echo "version=${VERSION}" >> $GITHUB_OUTPUT
mvn versions:set -DnewVersion=$GITHUB_SHA
mvn clean package -DskipTests
export VERSION=$(mvn -q -Dexec.executable=echo -Dexec.args='${project.version}' --non-recursive exec:exec)
echo "::set-output name=version::${VERSION}"
- name: Set up QEMU
uses: docker/setup-qemu-action@v2
uses: docker/setup-qemu-action@v1
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v2
uses: docker/setup-buildx-action@v1
- name: Cache Docker layers
uses: actions/cache@v3
uses: actions/cache@v2
with:
path: /tmp/.buildx-cache
key: ${{ runner.os }}-buildx-${{ github.sha }}
@ -40,7 +46,7 @@ jobs:
${{ runner.os }}-buildx-
- name: Build docker image
uses: docker/build-push-action@v4
uses: docker/build-push-action@v2
with:
builder: ${{ steps.buildx.outputs.name }}
context: kafka-ui-api
@ -55,7 +61,7 @@ jobs:
cache-to: type=local,dest=/tmp/.buildx-cache
- name: Run CVE checks
uses: aquasecurity/trivy-action@0.12.0
uses: aquasecurity/trivy-action@0.2.3
with:
image-ref: "provectuslabs/kafka-ui:${{ steps.build.outputs.version }}"
format: "table"

View file

@ -1,34 +0,0 @@
name: "Infra: Image Testing: Delete"
on:
workflow_dispatch:
pull_request:
types: ['unlabeled', 'closed']
jobs:
remove:
if: ${{ github.event.label.name == 'status/image_testing' || ( github.event.action == 'closed' && (contains(github.event.pull_request.labels, 'status/image_testing'))) }}
runs-on: ubuntu-latest
steps:
- name: get branch name
id: extract_branch
run: |
echo
tag='${{ github.event.pull_request.number }}'
echo "tag=${tag}" >> $GITHUB_OUTPUT
- name: Configure AWS credentials for Kafka-UI account
uses: aws-actions/configure-aws-credentials@v3
with:
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
aws-region: us-east-1
- name: Login to Amazon ECR
id: login-ecr
uses: aws-actions/amazon-ecr-login@v1
with:
registry-type: 'public'
- name: Remove from ECR
id: remove_from_ecr
run: |
aws ecr-public batch-delete-image \
--repository-name kafka-ui-custom-build \
--image-ids imageTag=${{ steps.extract_branch.outputs.tag }} \
--region us-east-1

View file

@ -1,4 +1,4 @@
name: "Infra: Docs: URL linter"
name: Documentation
on:
pull_request:
types:
@ -13,11 +13,11 @@ jobs:
build-and-test:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- uses: actions/checkout@v2
- name: Check URLs in files
uses: urlstechie/urlchecker-action@0.0.34
uses: urlstechie/urlchecker-action@0.2.31
with:
exclude_patterns: localhost,127.0.,192.168.
exclude_urls: https://api.server,https://graph.microsoft.com/User.Read,https://dev-a63ggcut.auth0.com/,http://main-schema-registry:8081,http://schema-registry:8081,http://another-yet-schema-registry:8081,http://another-schema-registry:8081
exclude_urls: https://api.server,https://graph.microsoft.com/User.Read,https://dev-a63ggcut.auth0.com/
print_all: false
file_types: .md

View file

@ -1,88 +0,0 @@
name: "E2E: Automation suite"
on:
workflow_dispatch:
inputs:
test_suite:
description: 'Select test suite to run'
default: 'regression'
required: true
type: choice
options:
- regression
- sanity
- smoke
qase_token:
description: 'Set Qase token to enable integration'
required: false
type: string
jobs:
build-and-test:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
with:
ref: ${{ github.sha }}
- name: Configure AWS credentials
uses: aws-actions/configure-aws-credentials@v3
with:
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
aws-region: eu-central-1
- name: Set up environment
id: set_env_values
run: |
cat "./kafka-ui-e2e-checks/.env.ci" >> "./kafka-ui-e2e-checks/.env"
- name: Pull with Docker
id: pull_chrome
run: |
docker pull selenoid/vnc_chrome:103.0
- name: Set up JDK
uses: actions/setup-java@v3
with:
java-version: '17'
distribution: 'zulu'
cache: 'maven'
- name: Build with Maven
id: build_app
run: |
./mvnw -B -ntp versions:set -DnewVersion=${{ github.sha }}
./mvnw -B -V -ntp clean install -Pprod -Dmaven.test.skip=true ${{ github.event.inputs.extraMavenOptions }}
- name: Compose with Docker
id: compose_app
# use the following command until #819 will be fixed
run: |
docker-compose -f kafka-ui-e2e-checks/docker/selenoid-git.yaml up -d
docker-compose -f ./documentation/compose/e2e-tests.yaml up -d
- name: Run test suite
run: |
./mvnw -B -ntp versions:set -DnewVersion=${{ github.sha }}
./mvnw -B -V -ntp -DQASEIO_API_TOKEN=${{ github.event.inputs.qase_token }} -Dsurefire.suiteXmlFiles='src/test/resources/${{ github.event.inputs.test_suite }}.xml' -Dsuite=${{ github.event.inputs.test_suite }} -f 'kafka-ui-e2e-checks' test -Pprod
- name: Generate Allure report
uses: simple-elf/allure-report-action@master
if: always()
id: allure-report
with:
allure_results: ./kafka-ui-e2e-checks/allure-results
gh_pages: allure-results
allure_report: allure-report
subfolder: allure-results
report_url: "http://kafkaui-allure-reports.s3-website.eu-central-1.amazonaws.com"
- uses: jakejarvis/s3-sync-action@master
if: always()
env:
AWS_S3_BUCKET: 'kafkaui-allure-reports'
AWS_REGION: 'eu-central-1'
SOURCE_DIR: 'allure-history/allure-results'
- name: Deploy report to Amazon S3
if: always()
uses: Sibz/github-status-action@v1.1.6
with:
authToken: ${{secrets.GITHUB_TOKEN}}
context: "Click Details button to open Allure report"
state: "success"
sha: ${{ github.sha }}
target_url: http://kafkaui-allure-reports.s3-website.eu-central-1.amazonaws.com/${{ github.run_number }}
- name: Dump Docker logs on failure
if: failure()
uses: jwalton/gh-docker-logs@v2.2.1

View file

@ -1,57 +1,53 @@
name: "E2E: PR healthcheck"
name: e2e-checks
on:
pull_request_target:
types: [ "opened", "edited", "reopened", "synchronize" ]
types: ["opened", "edited", "reopened", "synchronize"]
paths:
- "kafka-ui-api/**"
- "kafka-ui-contract/**"
- "kafka-ui-react-app/**"
- "kafka-ui-e2e-checks/**"
- "pom.xml"
permissions:
statuses: write
jobs:
build-and-test:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- uses: actions/checkout@v2
with:
ref: ${{ github.event.pull_request.head.sha }}
- name: Configure AWS credentials
uses: aws-actions/configure-aws-credentials@v3
- name: Cache local Maven repository
uses: actions/cache@v2
with:
aws-access-key-id: ${{ secrets.S3_AWS_ACCESS_KEY_ID }}
aws-secret-access-key: ${{ secrets.S3_AWS_SECRET_ACCESS_KEY }}
aws-region: eu-central-1
- name: Set up environment
path: ~/.m2/repository
key: ${{ runner.os }}-maven-${{ hashFiles('**/pom.xml') }}
restore-keys: |
${{ runner.os }}-maven-
- name: Set the values
id: set_env_values
run: |
cat "./kafka-ui-e2e-checks/.env.ci" >> "./kafka-ui-e2e-checks/.env"
- name: Pull with Docker
id: pull_chrome
- name: pull docker
id: pull_selenoid
run: |
docker pull selenoid/vnc_chrome:103.0
- name: Set up JDK
uses: actions/setup-java@v3
docker pull selenoid/vnc:chrome_86.0
- name: Set up JDK 1.13
uses: actions/setup-java@v1
with:
java-version: '17'
distribution: 'zulu'
cache: 'maven'
java-version: 1.13
- name: Build with Maven
id: build_app
run: |
./mvnw -B -ntp versions:set -DnewVersion=${{ github.event.pull_request.head.sha }}
./mvnw -B -V -ntp clean install -Pprod -Dmaven.test.skip=true ${{ github.event.inputs.extraMavenOptions }}
- name: Compose with Docker
mvn versions:set -DnewVersion=${{ github.event.pull_request.head.sha }}
mvn clean package -DskipTests ${{ github.event.inputs.extraMavenOptions }}
- name: compose app
id: compose_app
# use the following command until #819 will be fixed
run: |
docker-compose -f kafka-ui-e2e-checks/docker/selenoid-git.yaml up -d
docker-compose -f ./documentation/compose/e2e-tests.yaml up -d && until [ "$(docker exec kafka-ui wget --spider --server-response http://localhost:8080/actuator/health 2>&1 | grep -c 'HTTP/1.1 200 OK')" == "1" ]; do echo "Waiting for kafka-ui ..." && sleep 1; done
- name: Run test suite
docker-compose -f ./documentation/compose/kafka-ui-connectors.yaml up -d
- name: e2e run
run: |
./mvnw -B -ntp versions:set -DnewVersion=${{ github.event.pull_request.head.sha }}
./mvnw -B -V -ntp -Dsurefire.suiteXmlFiles='src/test/resources/smoke.xml' -f 'kafka-ui-e2e-checks' test -Pprod
mvn versions:set -DnewVersion=${{ github.event.pull_request.head.sha }}
mvn -pl '!kafka-ui-api' test -Pprod
- name: Generate allure report
uses: simple-elf/allure-report-action@master
if: always()
@ -61,22 +57,23 @@ jobs:
gh_pages: allure-results
allure_report: allure-report
subfolder: allure-results
report_url: "http://kafkaui-allure-reports.s3-website.eu-central-1.amazonaws.com"
- uses: jakejarvis/s3-sync-action@master
- name: Deploy allure report to Github Pages
if: always()
env:
AWS_S3_BUCKET: 'kafkaui-allure-reports'
AWS_REGION: 'eu-central-1'
SOURCE_DIR: 'allure-history/allure-results'
- name: Deploy report to Amazon S3
uses: peaceiris/actions-gh-pages@v3
with:
github_token: ${{ secrets.GITHUB_TOKEN }}
publish_dir: allure-history
publish_branch: gh-pages
destination_dir: ./allure
- name: Post the link to allure report
if: always()
uses: Sibz/github-status-action@v1.1.6
with:
authToken: ${{secrets.GITHUB_TOKEN}}
context: "Click Details button to open Allure report"
context: "Test report"
state: "success"
sha: ${{ github.event.pull_request.head.sha || github.sha }}
target_url: http://kafkaui-allure-reports.s3-website.eu-central-1.amazonaws.com/${{ github.run_number }}
target_url: https://${{ github.repository_owner }}.github.io/kafka-ui/allure/allure-results/${{ github.run_number }}
- name: Dump docker logs on failure
if: failure()
uses: jwalton/gh-docker-logs@v2.2.1
uses: jwalton/gh-docker-logs@v2.2.0

View file

@ -1,43 +0,0 @@
name: "E2E: Manual suite"
on:
workflow_dispatch:
inputs:
test_suite:
description: 'Select test suite to run'
default: 'manual'
required: true
type: choice
options:
- manual
- qase
qase_token:
description: 'Set Qase token to enable integration'
required: true
type: string
jobs:
build-and-test:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
with:
ref: ${{ github.sha }}
- name: Set up environment
id: set_env_values
run: |
cat "./kafka-ui-e2e-checks/.env.ci" >> "./kafka-ui-e2e-checks/.env"
- name: Set up JDK
uses: actions/setup-java@v3
with:
java-version: '17'
distribution: 'zulu'
cache: 'maven'
- name: Build with Maven
id: build_app
run: |
./mvnw -B -ntp versions:set -DnewVersion=${{ github.sha }}
./mvnw -B -V -ntp clean install -Pprod -Dmaven.test.skip=true ${{ github.event.inputs.extraMavenOptions }}
- name: Run test suite
run: |
./mvnw -B -ntp versions:set -DnewVersion=${{ github.sha }}
./mvnw -B -V -ntp -DQASEIO_API_TOKEN=${{ github.event.inputs.qase_token }} -Dsurefire.suiteXmlFiles='src/test/resources/${{ github.event.inputs.test_suite }}.xml' -Dsuite=${{ github.event.inputs.test_suite }} -f 'kafka-ui-e2e-checks' test -Pprod

View file

@ -1,75 +0,0 @@
name: "E2E: Weekly suite"
on:
schedule:
- cron: '0 1 * * 1'
jobs:
build-and-test:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
with:
ref: ${{ github.sha }}
- name: Configure AWS credentials
uses: aws-actions/configure-aws-credentials@v3
with:
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
aws-region: eu-central-1
- name: Set up environment
id: set_env_values
run: |
cat "./kafka-ui-e2e-checks/.env.ci" >> "./kafka-ui-e2e-checks/.env"
- name: Pull with Docker
id: pull_chrome
run: |
docker pull selenoid/vnc_chrome:103.0
- name: Set up JDK
uses: actions/setup-java@v3
with:
java-version: '17'
distribution: 'zulu'
cache: 'maven'
- name: Build with Maven
id: build_app
run: |
./mvnw -B -ntp versions:set -DnewVersion=${{ github.sha }}
./mvnw -B -V -ntp clean install -Pprod -Dmaven.test.skip=true ${{ github.event.inputs.extraMavenOptions }}
- name: Compose with Docker
id: compose_app
# use the following command until #819 will be fixed
run: |
docker-compose -f kafka-ui-e2e-checks/docker/selenoid-git.yaml up -d
docker-compose -f ./documentation/compose/e2e-tests.yaml up -d
- name: Run test suite
run: |
./mvnw -B -ntp versions:set -DnewVersion=${{ github.sha }}
./mvnw -B -V -ntp -DQASEIO_API_TOKEN=${{ secrets.QASEIO_API_TOKEN }} -Dsurefire.suiteXmlFiles='src/test/resources/sanity.xml' -Dsuite=weekly -f 'kafka-ui-e2e-checks' test -Pprod
- name: Generate Allure report
uses: simple-elf/allure-report-action@master
if: always()
id: allure-report
with:
allure_results: ./kafka-ui-e2e-checks/allure-results
gh_pages: allure-results
allure_report: allure-report
subfolder: allure-results
report_url: "http://kafkaui-allure-reports.s3-website.eu-central-1.amazonaws.com"
- uses: jakejarvis/s3-sync-action@master
if: always()
env:
AWS_S3_BUCKET: 'kafkaui-allure-reports'
AWS_REGION: 'eu-central-1'
SOURCE_DIR: 'allure-history/allure-results'
- name: Deploy report to Amazon S3
if: always()
uses: Sibz/github-status-action@v1.1.6
with:
authToken: ${{secrets.GITHUB_TOKEN}}
context: "Click Details button to open Allure report"
state: "success"
sha: ${{ github.sha }}
target_url: http://kafkaui-allure-reports.s3-website.eu-central-1.amazonaws.com/${{ github.run_number }}
- name: Dump Docker logs on failure
if: failure()
uses: jwalton/gh-docker-logs@v2.2.1

View file

@ -1,4 +1,4 @@
name: "Frontend: PR/master build & test"
name: frontend
on:
push:
branches:
@ -8,9 +8,6 @@ on:
paths:
- "kafka-ui-contract/**"
- "kafka-ui-react-app/**"
permissions:
checks: write
pull-requests: write
jobs:
build-and-test:
env:
@ -18,38 +15,40 @@ jobs:
NODE_ENV: dev
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- uses: actions/checkout@v2
with:
# Disabling shallow clone is recommended for improving relevancy of reporting
fetch-depth: 0
ref: ${{ github.event.pull_request.head.sha }}
- uses: pnpm/action-setup@v2.4.0
- name: Use Node.js
uses: actions/setup-node@v3.1.1
with:
version: 8.6.12
- name: Install node
uses: actions/setup-node@v3.8.1
node-version: "14"
- name: Cache node dependency
uses: actions/cache@v1
with:
node-version: "18.17.1"
cache: "pnpm"
cache-dependency-path: "./kafka-ui-react-app/pnpm-lock.yaml"
path: kafka-ui-react-app/node_modules
key: ${{ runner.os }}-node-${{ hashFiles('**/package-lock.json') }}
restore-keys: |
${{ runner.os }}-node-
- name: Install Node dependencies
run: |
cd kafka-ui-react-app/
pnpm install --frozen-lockfile
npm install
- name: Generate sources
run: |
cd kafka-ui-react-app/
pnpm gen:sources
npm run gen:sources
- name: Linter
run: |
cd kafka-ui-react-app/
pnpm lint:CI
npm run lint:CI
- name: Tests
run: |
cd kafka-ui-react-app/
pnpm test:CI
npm run test:CI
- name: SonarCloud Scan
uses: sonarsource/sonarcloud-github-action@master
uses: workshur/sonarcloud-github-action@improved_basedir
with:
projectBaseDir: ./kafka-ui-react-app
args: -Dsonar.pullrequest.key=${{ github.event.pull_request.number }} -Dsonar.pullrequest.branch=${{ github.head_ref }} -Dsonar.pullrequest.base=${{ github.base_ref }}

32
.github/workflows/helm.yaml vendored Normal file
View file

@ -0,0 +1,32 @@
name: Helm
on:
pull_request:
types: [ 'opened', 'edited', 'reopened', 'synchronize' ]
paths:
- "charts/**"
schedule:
# * is a special character in YAML so you have to quote this string
- cron: '0 8 * * 3'
jobs:
build-and-test:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- name: Helm tool installer
uses: Azure/setup-helm@v1
- name: Setup Kubeval
uses: lra/setup-kubeval@v1.0.1
- name: Run kubeval
shell: bash
run: |
sed -i "s@enabled: false@enabled: true@g" charts/kafka-ui/values.yaml
K8S_VERSIONS=$(git ls-remote --refs --tags https://github.com/kubernetes/kubernetes.git | cut -d/ -f3 | grep -e '^v1\.[0-9]\{2\}\.[0]\{1,2\}$' | grep -v -e '^v1\.1[0-8]\{1\}' | cut -c2-)
echo "NEXT K8S VERSIONS ARE GOING TO BE TESTED: $K8S_VERSIONS"
echo ""
for version in $K8S_VERSIONS
do
echo $version;
helm template charts/kafka-ui -f charts/kafka-ui/values.yaml | kubeval --additional-schema-locations https://raw.githubusercontent.com/yannh/kubernetes-json-schema/master --strict -v $version;
done

View file

@ -1,4 +1,4 @@
name: "Master: Build & deploy"
name: Master
on:
workflow_dispatch:
push:
@ -8,37 +8,41 @@ jobs:
build:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
with:
ref: ${{ github.event.pull_request.head.sha }}
- uses: actions/checkout@v2
- name: Set up JDK
uses: actions/setup-java@v3
- name: Cache local Maven repository
uses: actions/cache@v2
with:
java-version: '17'
distribution: 'zulu'
cache: 'maven'
path: ~/.m2/repository
key: ${{ runner.os }}-maven-${{ hashFiles('**/pom.xml') }}
restore-keys: |
${{ runner.os }}-maven-
- name: Set up JDK 1.13
uses: actions/setup-java@v1
with:
java-version: 1.13
- name: Build
id: build
run: |
./mvnw -B -ntp versions:set -DnewVersion=$GITHUB_SHA
./mvnw -V -B -ntp clean package -Pprod -DskipTests
export VERSION=$(./mvnw -q -Dexec.executable=echo -Dexec.args='${project.version}' --non-recursive exec:exec)
echo "version=${VERSION}" >> $GITHUB_OUTPUT
mvn versions:set -DnewVersion=$GITHUB_SHA
mvn clean package -Pprod -DskipTests
export VERSION=$(mvn -q -Dexec.executable=echo -Dexec.args='${project.version}' --non-recursive exec:exec)
echo "::set-output name=version::${VERSION}"
#################
# #
# Docker images #
# #
#################
- name: Set up QEMU
uses: docker/setup-qemu-action@v2
uses: docker/setup-qemu-action@v1
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v2
uses: docker/setup-buildx-action@v1
- name: Cache Docker layers
uses: actions/cache@v3
uses: actions/cache@v2
with:
path: /tmp/.buildx-cache
key: ${{ runner.os }}-buildx-${{ github.sha }}
@ -46,19 +50,18 @@ jobs:
${{ runner.os }}-buildx-
- name: Login to DockerHub
uses: docker/login-action@v2
uses: docker/login-action@v1
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Build and push
id: docker_build_and_push
uses: docker/build-push-action@v4
uses: docker/build-push-action@v2
with:
builder: ${{ steps.buildx.outputs.name }}
context: kafka-ui-api
platforms: linux/amd64,linux/arm64
provenance: false
push: true
tags: |
provectuslabs/kafka-ui:${{ steps.build.outputs.version }}
@ -74,11 +77,11 @@ jobs:
#################################
- name: update-master-deployment
run: |
git clone https://infra-tech:${{ secrets.INFRA_USER_ACCESS_TOKEN }}@github.com/provectus/kafka-ui-infra.git --branch master
git clone https://kafka-ui-infra:${{ secrets.KAFKA_UI_INFRA_TOKEN }}@gitlab.provectus.com/provectus-internals/kafka-ui-infra.git --branch master
cd kafka-ui-infra/aws-infrastructure4eks/argocd/scripts
echo "Image digest is:${{ steps.docker_build_and_push.outputs.digest }}"
./kafka-ui-update-master-digest.sh ${{ steps.docker_build_and_push.outputs.digest }}
git config --global user.email "infra-tech@provectus.com"
git config --global user.name "infra-tech"
git config --global user.email "kafka-ui-infra@provectus.com"
git config --global user.name "kafka-ui-infra"
git add ../kafka-ui/*
git commit -m "updated master image digest: ${{ steps.docker_build_and_push.outputs.digest }}" && git push

View file

@ -1,14 +1,13 @@
name: "PR: Checklist linter"
name: "PR Checklist checked"
on:
pull_request_target:
types: [opened, edited, synchronize, reopened]
permissions:
checks: write
jobs:
task-check:
runs-on: ubuntu-latest
steps:
- uses: kentaro-m/task-completed-checker-action@v0.1.2
- uses: kentaro-m/task-completed-checker-action@v0.1.0
with:
repo-token: "${{ secrets.GITHUB_TOKEN }}"
- uses: dekinderfiets/pr-description-enforcer@0.0.1

View file

@ -1,30 +0,0 @@
name: "Infra: Release: Serde API"
on: workflow_dispatch
jobs:
release-serde-api:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
with:
fetch-depth: 0
- run: |
git config user.name github-actions
git config user.email github-actions@github.com
- name: Set up JDK
uses: actions/setup-java@v3
with:
java-version: "17"
distribution: "zulu"
cache: "maven"
- id: install-secret-key
name: Install GPG secret key
run: |
cat <(echo -e "${{ secrets.GPG_PRIVATE_KEY }}") | gpg --batch --import
- name: Publish to Maven Central
run: |
mvn source:jar javadoc:jar package gpg:sign -Dgpg.passphrase=${{ secrets.GPG_PASSPHRASE }} -Dserver.username=${{ secrets.NEXUS_USERNAME }} -Dserver.password=${{ secrets.NEXUS_PASSWORD }} nexus-staging:deploy -pl kafka-ui-serde-api -s settings.xml

View file

@ -1,4 +1,4 @@
name: "Infra: Release"
name: Release
on:
release:
types: [published]
@ -9,32 +9,37 @@ jobs:
outputs:
version: ${{steps.build.outputs.version}}
steps:
- uses: actions/checkout@v3
- uses: actions/checkout@v2
with:
fetch-depth: 0
ref: ${{ github.event.pull_request.head.sha }}
- run: |
git config user.name github-actions
git config user.email github-actions@github.com
- name: Set up JDK
uses: actions/setup-java@v3
- name: Cache local Maven repository
uses: actions/cache@v2
with:
java-version: '17'
distribution: 'zulu'
cache: 'maven'
path: ~/.m2/repository
key: ${{ runner.os }}-maven-${{ hashFiles('**/pom.xml') }}
restore-keys: |
${{ runner.os }}-maven-
- name: Set up JDK 1.13
uses: actions/setup-java@v1
with:
java-version: 1.13
- name: Build with Maven
id: build
run: |
./mvnw -B -ntp versions:set -DnewVersion=${{ github.event.release.tag_name }}
./mvnw -B -V -ntp clean package -Pprod -DskipTests
export VERSION=$(./mvnw -q -Dexec.executable=echo -Dexec.args='${project.version}' --non-recursive exec:exec)
echo "version=${VERSION}" >> $GITHUB_OUTPUT
mvn versions:set -DnewVersion=${{ github.event.release.tag_name }}
mvn clean package -Pprod -DskipTests
export VERSION=$(mvn -q -Dexec.executable=echo -Dexec.args='${project.version}' --non-recursive exec:exec)
echo ::set-output name=version::${VERSION}
- name: Upload files to a GitHub release
uses: svenstaro/upload-release-action@2.7.0
uses: svenstaro/upload-release-action@2.2.1
with:
repo_token: ${{ secrets.GITHUB_TOKEN }}
file: kafka-ui-api/target/kafka-ui-api-${{ steps.build.outputs.version }}.jar
@ -51,13 +56,13 @@ jobs:
# #
#################
- name: Set up QEMU
uses: docker/setup-qemu-action@v2
uses: docker/setup-qemu-action@v1
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v2
uses: docker/setup-buildx-action@v1
- name: Cache Docker layers
uses: actions/cache@v3
uses: actions/cache@v2
with:
path: /tmp/.buildx-cache
key: ${{ runner.os }}-buildx-${{ github.sha }}
@ -65,19 +70,18 @@ jobs:
${{ runner.os }}-buildx-
- name: Login to DockerHub
uses: docker/login-action@v2
uses: docker/login-action@v1
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Build and push
id: docker_build_and_push
uses: docker/build-push-action@v4
uses: docker/build-push-action@v2
with:
builder: ${{ steps.buildx.outputs.name }}
context: kafka-ui-api
platforms: linux/amd64,linux/arm64
provenance: false
push: true
tags: |
provectuslabs/kafka-ui:${{ steps.build.outputs.version }}
@ -91,10 +95,30 @@ jobs:
runs-on: ubuntu-latest
needs: release
steps:
- name: Repository Dispatch
uses: peter-evans/repository-dispatch@v2
- uses: actions/checkout@v2
with:
token: ${{ secrets.CHARTS_ACTIONS_TOKEN }}
repository: provectus/kafka-ui-charts
event-type: prepare-helm-release
client-payload: '{"appversion": "${{ needs.release.outputs.version }}"}'
fetch-depth: 1
- run: |
git config user.name github-actions
git config user.email github-actions@github.com
- uses: azure/setup-helm@v1
- name: update chart version
run: |
export version=${{needs.release.outputs.version}}
sed -i "s/version:.*/version: ${version}/" charts/kafka-ui/Chart.yaml
sed -i "s/appVersion:.*/appVersion: ${version}/" charts/kafka-ui/Chart.yaml
- name: add chart
run: |
export VERSION=${{needs.release.outputs.version}}
MSG=$(helm package --app-version ${VERSION} charts/kafka-ui)
git fetch origin
git stash
git checkout -b gh-pages origin/gh-pages
helm repo index .
git add -f ${MSG##*/} index.yaml
git commit -m "release ${VERSION}"
git push

View file

@ -1,34 +1,19 @@
name: "Infra: Release Drafter run"
name: Release Drafter
on:
push:
# branches to consider in the event; optional, defaults to all
branches:
- master
workflow_dispatch:
inputs:
version:
description: 'Release version'
required: false
branch:
description: 'Target branch'
required: false
default: 'master'
permissions:
contents: read
jobs:
update_release_draft:
runs-on: ubuntu-latest
permissions:
contents: write
pull-requests: write
steps:
- uses: release-drafter/release-drafter@v5
with:
config-name: release_drafter.yaml
disable-autolabeler: true
version: ${{ github.event.inputs.version }}
commitish: ${{ github.event.inputs.branch }}
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}

View file

@ -1,4 +1,4 @@
name: "Infra: Feature Testing Public: Init env"
name: Separate environment create
on:
workflow_dispatch:
inputs:
@ -8,82 +8,19 @@ on:
default: 'demo'
jobs:
build:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
with:
ref: ${{ github.event.pull_request.head.sha }}
- name: get branch name
id: extract_branch
run: |
tag="${{ github.event.inputs.ENV_NAME }}-$(date '+%F-%H-%M-%S')"
echo "tag=${tag}" >> $GITHUB_OUTPUT
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- name: Set up JDK
uses: actions/setup-java@v3
with:
java-version: '17'
distribution: 'zulu'
cache: 'maven'
- name: Build
id: build
run: |
./mvnw -B -ntp versions:set -DnewVersion=$GITHUB_SHA
./mvnw -B -V -ntp clean package -Pprod -DskipTests
export VERSION=$(./mvnw -q -Dexec.executable=echo -Dexec.args='${project.version}' --non-recursive exec:exec)
echo "version=${VERSION}" >> $GITHUB_OUTPUT
- name: Set up QEMU
uses: docker/setup-qemu-action@v2
- name: Set up Docker Buildx
id: buildx
uses: docker/setup-buildx-action@v2
- name: Cache Docker layers
uses: actions/cache@v3
with:
path: /tmp/.buildx-cache
key: ${{ runner.os }}-buildx-${{ github.sha }}
restore-keys: |
${{ runner.os }}-buildx-
- name: Configure AWS credentials for Kafka-UI account
uses: aws-actions/configure-aws-credentials@v3
with:
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
aws-region: eu-central-1
- name: Login to Amazon ECR
id: login-ecr
uses: aws-actions/amazon-ecr-login@v1
- name: Build and push
id: docker_build_and_push
uses: docker/build-push-action@v4
with:
builder: ${{ steps.buildx.outputs.name }}
context: kafka-ui-api
push: true
tags: 297478128798.dkr.ecr.eu-central-1.amazonaws.com/kafka-ui:${{ steps.extract_branch.outputs.tag }}
build-args: |
JAR_FILE=kafka-ui-api-${{ steps.build.outputs.version }}.jar
cache-from: type=local,src=/tmp/.buildx-cache
cache-to: type=local,dest=/tmp/.buildx-cache
outputs:
tag: ${{ steps.extract_branch.outputs.tag }}
separate-env-create:
runs-on: ubuntu-latest
needs: build
steps:
- name: clone
run: |
git clone https://infra-tech:${{ secrets.INFRA_USER_ACCESS_TOKEN }}@github.com/provectus/kafka-ui-infra.git --branch envs
git clone https://kafka-ui-infra:${{ secrets.KAFKA_UI_INFRA_TOKEN }}@gitlab.provectus.com/provectus-internals/kafka-ui-infra.git
- name: separate env create
run: |
cd kafka-ui-infra/aws-infrastructure4eks/argocd/scripts
bash separate_env_create.sh ${{ github.event.inputs.ENV_NAME }} ${{ secrets.FEATURE_TESTING_UI_PASSWORD }} ${{ needs.build.outputs.tag }}
git config --global user.email "infra-tech@provectus.com"
git config --global user.name "infra-tech"
bash separate_env_create.sh ${{ github.event.inputs.ENV_NAME }} ${{ secrets.FEATURE_TESTING_UI_PASSWORD }}
git config --global user.email "kafka-ui-infra@provectus.com"
git config --global user.name "kafka-ui-infra"
git add -A
git commit -m "separate env added: ${{ github.event.inputs.ENV_NAME }}" && git push || true

View file

@ -1,4 +1,4 @@
name: "Infra: Feature Testing Public: Destroy env"
name: Separate environment remove
on:
workflow_dispatch:
inputs:
@ -13,12 +13,12 @@ jobs:
steps:
- name: clone
run: |
git clone https://infra-tech:${{ secrets.INFRA_USER_ACCESS_TOKEN }}@github.com/provectus/kafka-ui-infra.git --branch envs
git clone https://kafka-ui-infra:${{ secrets.KAFKA_UI_INFRA_TOKEN }}@gitlab.provectus.com/provectus-internals/kafka-ui-infra.git
- name: separate environment remove
run: |
cd kafka-ui-infra/aws-infrastructure4eks/argocd/scripts
bash separate_env_remove.sh ${{ github.event.inputs.ENV_NAME }}
git config --global user.email "infra-tech@provectus.com"
git config --global user.name "infra-tech"
git config --global user.email "kafka-ui-infra@provectus.com"
git config --global user.name "kafka-ui-infra"
git add -A
git commit -m "separate env removed: ${{ github.event.inputs.ENV_NAME }}" && git push || true

View file

@ -1,4 +1,4 @@
name: 'Infra: Close stale issues'
name: 'Close stale issues'
on:
schedule:
- cron: '30 1 * * *'
@ -7,7 +7,7 @@ jobs:
stale:
runs-on: ubuntu-latest
steps:
- uses: actions/stale@v8
- uses: actions/stale@v5
with:
days-before-issue-stale: 7
days-before-issue-close: 3

View file

@ -1,4 +1,4 @@
name: "Infra: Terraform deploy"
name: terraform_deploy
on:
workflow_dispatch:
inputs:
@ -26,14 +26,18 @@ jobs:
echo "Terraform will be triggered in this dir $TF_DIR"
- name: Configure AWS credentials for Kafka-UI account
uses: aws-actions/configure-aws-credentials@v3
uses: aws-actions/configure-aws-credentials@v1
with:
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
aws-region: eu-central-1
- name: Terraform Install
uses: hashicorp/setup-terraform@v2
uses: hashicorp/setup-terraform@v1
- name: Terraform format
id: fmt
run: cd $TF_DIR && terraform fmt -check
- name: Terraform init
id: init

View file

@ -1,4 +1,4 @@
name: "Infra: Triage: Apply triage label for issues"
name: Add triage label to new issues
on:
issues:
types:

View file

@ -1,4 +1,4 @@
name: "Infra: Triage: Apply triage label for PRs"
name: Add triage label to new PRs
on:
pull_request:
types:

View file

@ -7,9 +7,7 @@ on:
issues:
types:
- opened
permissions:
issues: write
pull-requests: write
jobs:
welcome:
runs-on: ubuntu-latest

View file

@ -1,4 +1,4 @@
name: "Infra: Workflow linter"
name: "Workflow linter"
on:
pull_request:
types:
@ -12,7 +12,7 @@ jobs:
build-and-test:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- uses: actions/checkout@v2
with:
fetch-depth: 0
ref: ${{ github.event.pull_request.head.sha }}

3
.gitignore vendored
View file

@ -31,9 +31,6 @@ build/
.vscode/
/kafka-ui-api/app/node
### SDKMAN ###
.sdkmanrc
.DS_Store
*.code-workspace

117
.mvn/wrapper/MavenWrapperDownloader.java vendored Normal file
View file

@ -0,0 +1,117 @@
/*
* Copyright 2007-present the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.net.*;
import java.io.*;
import java.nio.channels.*;
import java.util.Properties;
public class MavenWrapperDownloader {
private static final String WRAPPER_VERSION = "0.5.6";
/**
* Default URL to download the maven-wrapper.jar from, if no 'downloadUrl' is provided.
*/
private static final String DEFAULT_DOWNLOAD_URL = "https://repo.maven.apache.org/maven2/io/takari/maven-wrapper/"
+ WRAPPER_VERSION + "/maven-wrapper-" + WRAPPER_VERSION + ".jar";
/**
* Path to the maven-wrapper.properties file, which might contain a downloadUrl property to
* use instead of the default one.
*/
private static final String MAVEN_WRAPPER_PROPERTIES_PATH =
".mvn/wrapper/maven-wrapper.properties";
/**
* Path where the maven-wrapper.jar will be saved to.
*/
private static final String MAVEN_WRAPPER_JAR_PATH =
".mvn/wrapper/maven-wrapper.jar";
/**
* Name of the property which should be used to override the default download url for the wrapper.
*/
private static final String PROPERTY_NAME_WRAPPER_URL = "wrapperUrl";
public static void main(String args[]) {
System.out.println("- Downloader started");
File baseDirectory = new File(args[0]);
System.out.println("- Using base directory: " + baseDirectory.getAbsolutePath());
// If the maven-wrapper.properties exists, read it and check if it contains a custom
// wrapperUrl parameter.
File mavenWrapperPropertyFile = new File(baseDirectory, MAVEN_WRAPPER_PROPERTIES_PATH);
String url = DEFAULT_DOWNLOAD_URL;
if(mavenWrapperPropertyFile.exists()) {
FileInputStream mavenWrapperPropertyFileInputStream = null;
try {
mavenWrapperPropertyFileInputStream = new FileInputStream(mavenWrapperPropertyFile);
Properties mavenWrapperProperties = new Properties();
mavenWrapperProperties.load(mavenWrapperPropertyFileInputStream);
url = mavenWrapperProperties.getProperty(PROPERTY_NAME_WRAPPER_URL, url);
} catch (IOException e) {
System.out.println("- ERROR loading '" + MAVEN_WRAPPER_PROPERTIES_PATH + "'");
} finally {
try {
if(mavenWrapperPropertyFileInputStream != null) {
mavenWrapperPropertyFileInputStream.close();
}
} catch (IOException e) {
// Ignore ...
}
}
}
System.out.println("- Downloading from: " + url);
File outputFile = new File(baseDirectory.getAbsolutePath(), MAVEN_WRAPPER_JAR_PATH);
if(!outputFile.getParentFile().exists()) {
if(!outputFile.getParentFile().mkdirs()) {
System.out.println(
"- ERROR creating output directory '" + outputFile.getParentFile().getAbsolutePath() + "'");
}
}
System.out.println("- Downloading to: " + outputFile.getAbsolutePath());
try {
downloadFileFromURL(url, outputFile);
System.out.println("Done");
System.exit(0);
} catch (Throwable e) {
System.out.println("- Error downloading");
e.printStackTrace();
System.exit(1);
}
}
private static void downloadFileFromURL(String urlString, File destination) throws Exception {
if (System.getenv("MVNW_USERNAME") != null && System.getenv("MVNW_PASSWORD") != null) {
String username = System.getenv("MVNW_USERNAME");
char[] password = System.getenv("MVNW_PASSWORD").toCharArray();
Authenticator.setDefault(new Authenticator() {
@Override
protected PasswordAuthentication getPasswordAuthentication() {
return new PasswordAuthentication(username, password);
}
});
}
URL website = new URL(urlString);
ReadableByteChannel rbc;
rbc = Channels.newChannel(website.openStream());
FileOutputStream fos = new FileOutputStream(destination);
fos.getChannel().transferFrom(rbc, 0, Long.MAX_VALUE);
fos.close();
rbc.close();
}
}

Binary file not shown.

View file

@ -1,18 +1,2 @@
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
distributionUrl=https://repo.maven.apache.org/maven2/org/apache/maven/apache-maven/3.8.6/apache-maven-3.8.6-bin.zip
wrapperUrl=https://repo.maven.apache.org/maven2/org/apache/maven/wrapper/maven-wrapper/3.1.1/maven-wrapper-3.1.1.jar
distributionUrl=https://repo.maven.apache.org/maven2/org/apache/maven/apache-maven/3.6.3/apache-maven-3.6.3-bin.zip
wrapperUrl=https://repo.maven.apache.org/maven2/io/takari/maven-wrapper/0.5.6/maven-wrapper-0.5.6.jar

View file

@ -1,5 +1,3 @@
This guide is an exact copy of the same documented located [in our official docs](https://docs.kafka-ui.provectus.io/development/contributing). If there are any differences between the documents, the one located in our official docs should prevail.
This guide aims to walk you through the process of working on issues and Pull Requests (PRs).
Bear in mind that you will not be able to complete some steps on your own if you do not have a “write” permission. Feel free to reach out to the maintainers to help you unlock these activities.
@ -22,7 +20,7 @@ You also need to consider labels. You can sort the issues by scope labels, such
## Grabbing the issue
There is a bunch of criteria that make an issue feasible for development. <br/>
The implementation of any features and/or their enhancements should be reasonable, must be backed by justified requirements (demanded by the community, [roadmap](https://docs.kafka-ui.provectus.io/project/roadmap) plans, etc.). The final decision is left for the maintainers' discretion.
The implementation of any features and/or their enhancements should be reasonable, must be backed by justified requirements (demanded by the community, [roadmap](documentation/project/ROADMAP.md) plans, etc.). The final decision is left for the maintainers' discretion.
All bugs should be confirmed as such (i.e. the behavior is unintended).
@ -41,7 +39,7 @@ To keep the status of the issue clear to everyone, please keep the card's status
## Setting up a local development environment
Please refer to [this guide](https://docs.kafka-ui.provectus.io/development/contributing).
Please refer to [this guide](documentation/project/contributing/README.md).
# Pull Requests
@ -80,7 +78,6 @@ When creating a PR please do the following:
4. If the PR does not close any of the issues, the PR itself might need to have a milestone set. Reach out to the maintainers to consult.
5. Assign the PR to yourself. A PR assignee is someone whose goal is to get the PR merged.
6. Add reviewers. As a rule, reviewers' suggestions are pretty good; please use them.
7. Upon merging the PR, please use a meaningful commit message, task name should be fine in this case.
### Pull Request checklist

178
README.md
View file

@ -1,37 +1,21 @@
![UI for Apache Kafka logo](documentation/images/kafka-ui-logo.png) UI for Apache Kafka&nbsp;
------------------
#### Versatile, fast and lightweight web UI for managing Apache Kafka® clusters. Built by developers, for developers.
<br/>
[![License](https://img.shields.io/badge/License-Apache%202.0-blue.svg)](https://github.com/provectus/kafka-ui/blob/master/LICENSE)
![UI for Apache Kafka Price Free](documentation/images/free-open-source.svg)
[![Release version](https://img.shields.io/github/v/release/provectus/kafka-ui)](https://github.com/provectus/kafka-ui/releases)
[![Chat with us](https://img.shields.io/discord/897805035122077716)](https://discord.gg/4DWzD7pGE5)
[![Docker pulls](https://img.shields.io/docker/pulls/provectuslabs/kafka-ui)](https://hub.docker.com/r/provectuslabs/kafka-ui)
<p align="center">
<a href="https://docs.kafka-ui.provectus.io/">DOCS</a>
<a href="https://docs.kafka-ui.provectus.io/configuration/quick-start">QUICK START</a>
<a href="https://discord.gg/4DWzD7pGE5">COMMUNITY DISCORD</a>
<br/>
<a href="https://aws.amazon.com/marketplace/pp/prodview-ogtt5hfhzkq6a">AWS Marketplace</a>
<a href="https://www.producthunt.com/products/ui-for-apache-kafka/reviews/new">ProductHunt</a>
</p>
<p align="center">
<img src="https://repobeats.axiom.co/api/embed/2e8a7c2d711af9daddd34f9791143e7554c35d0f.svg" />
</p>
#### UI for Apache Kafka is a free, open-source web UI to monitor and manage Apache Kafka clusters.
UI for Apache Kafka is a simple tool that makes your data flows observable, helps find and troubleshoot issues faster and deliver optimal performance. Its lightweight dashboard makes it easy to track key metrics of your Kafka clusters - Brokers, Topics, Partitions, Production, and Consumption.
### DISCLAIMER
<em>UI for Apache Kafka is a free tool built and supported by the open-source community. Curated by Provectus, it will remain free and open-source, without any paid features or subscription plans to be added in the future.
Looking for the help of Kafka experts? Provectus can help you design, build, deploy, and manage Apache Kafka clusters and streaming applications. Discover [Professional Services for Apache Kafka](https://provectus.com/professional-services-apache-kafka/), to unlock the full potential of Kafka in your enterprise! </em>
<em>UI for Apache Kafka is a free, open-source tool that is curated by Provectus, and is built and supported by the open-source community. The tool will remain free and open-source in the future. Provectus does not plan to add any paid features or subscription plans so that everyone can have a better experience observing their data. UI for Apache Kafka is a part of the [Provectus NextGen Data Platform](https://provectus.com/nextgen-data-platform/). Check it out for more details! </em>
Set up UI for Apache Kafka with just a couple of easy commands to visualize your Kafka data in a comprehensible way. You can run the tool locally or in
the cloud.
#### UI for Apache Kafka is a free, open-source web UI to monitor and manage Apache Kafka clusters.
UI for Apache Kafka is a simple tool that makes your data flows observable, helps find and troubleshoot issues faster and deliver optimal performance. Its lightweight dashboard makes it easy to track key metrics of your Kafka clusters - Brokers, Topics, Partitions, Production, and Consumption.
Set up UI for Apache Kafka with just a couple of easy commands to visualize your Kafka data in a comprehensible way. You can run the tool locally or in
the cloud.
![Interface](documentation/images/Interface.gif)
@ -43,24 +27,21 @@ the cloud.
* **View Consumer Groups** — view per-partition parked offsets, combined and per-partition lag
* **Browse Messages** — browse messages with JSON, plain text, and Avro encoding
* **Dynamic Topic Configuration** — create and configure new topics with dynamic configuration
* **Configurable Authentification** — [secure](https://docs.kafka-ui.provectus.io/configuration/authentication) your installation with optional Github/Gitlab/Google OAuth 2.0
* **Custom serialization/deserialization plugins** - [use](https://docs.kafka-ui.provectus.io/configuration/serialization-serde) a ready-to-go serde for your data like AWS Glue or Smile, or code your own!
* **Role based access control** - [manage permissions](https://docs.kafka-ui.provectus.io/configuration/rbac-role-based-access-control) to access the UI with granular precision
* **Data masking** - [obfuscate](https://docs.kafka-ui.provectus.io/configuration/data-masking) sensitive data in topic messages
* **Configurable Authentification** — secure your installation with optional Github/Gitlab/Google OAuth 2.0
# The Interface
UI for Apache Kafka wraps major functions of Apache Kafka with an intuitive user interface.
![Interface](documentation/images/Interface.gif)
## Topics
UI for Apache Kafka makes it easy for you to create topics in your browser by several clicks,
UI for Apache Kafka makes it easy for you to create topics in your browser by several clicks,
pasting your own parameters, and viewing topics in the list.
![Create Topic](documentation/images/Create_topic_kafka-ui.gif)
It's possible to jump from connectors view to corresponding topics and from a topic to consumers (back and forth) for more convenient navigation.
connectors, overview topic settings.
connectors, overview topic settings.
![Connector_Topic_Consumer](documentation/images/Connector_Topic_Consumer.gif)
@ -74,68 +55,123 @@ There are 3 supported types of schemas: Avro®, JSON Schema, and Protobuf schema
![Create Schema Registry](documentation/images/Create_schema.gif)
Before producing avro/protobuf encoded messages, you have to add a schema for the topic in Schema Registry. Now all these steps are easy to do
Before producing avro-encoded messages, you have to add an avro schema for the topic in Schema Registry. Now all these steps are easy to do
with a few clicks in a user-friendly interface.
![Avro Schema Topic](documentation/images/Schema_Topic.gif)
# Getting Started
To run UI for Apache Kafka, you can use either a pre-built Docker image or build it (or a jar file) yourself.
To run UI for Apache Kafka, you can use a pre-built Docker image or build it locally.
## Quick start (Demo run)
## Configuration
We have plenty of [docker-compose files](documentation/compose/DOCKER_COMPOSE.md) as examples. They're built for various configuration stacks.
# Guides
- [SSO configuration](documentation/guides/SSO.md)
- [AWS IAM configuration](documentation/guides/AWS_IAM.md)
- [Docker-compose files](documentation/compose/DOCKER_COMPOSE.md)
- [Connection to a secure broker](documentation/compose/SECURE_BROKER.md)
### Configuration File
Example of how to configure clusters in the [application-local.yml](https://github.com/provectus/kafka-ui/blob/master/kafka-ui-api/src/main/resources/application-local.yml) configuration file:
```sh
kafka:
clusters:
-
name: local
bootstrapServers: localhost:29091
schemaRegistry: http://localhost:8085
schemaRegistryAuth:
username: username
password: password
# schemaNameTemplate: "%s-value"
jmxPort: 9997
-
```
* `name`: cluster name
* `bootstrapServers`: where to connect
* `schemaRegistry`: schemaRegistry's address
* `schemaRegistryAuth.username`: schemaRegistry's basic authentication username
* `schemaRegistryAuth.password`: schemaRegistry's basic authentication password
* `schemaNameTemplate`: how keys are saved to schemaRegistry
* `jmxPort`: open jmxPosrts of a broker
* `readOnly`: enable read only mode
Configure as many clusters as you need by adding their configs below separated with `-`.
## Running a Docker Image
The official Docker image for UI for Apache Kafka is hosted here: [hub.docker.com/r/provectuslabs/kafka-ui](https://hub.docker.com/r/provectuslabs/kafka-ui).
Launch Docker container in the background:
```sh
docker run -p 8080:8080 \
-e KAFKA_CLUSTERS_0_NAME=local \
-e KAFKA_CLUSTERS_0_BOOTSTRAPSERVERS=kafka:9092 \
-d provectuslabs/kafka-ui:latest
```
docker run -it -p 8080:8080 -e DYNAMIC_CONFIG_ENABLED=true provectuslabs/kafka-ui
```
Then access the web UI at [http://localhost:8080](http://localhost:8080).
Further configuration with environment variables - [see environment variables](#env_variables)
### Docker Compose
Then access the web UI at [http://localhost:8080](http://localhost:8080)
If you prefer to use `docker-compose` please refer to the [documentation](docker-compose.md).
The command is sufficient to try things out. When you're done trying things out, you can proceed with a [persistent installation](https://docs.kafka-ui.provectus.io/quick-start/persistent-start)
## Persistent installation
## Building With Docker
```
services:
kafka-ui:
container_name: kafka-ui
image: provectuslabs/kafka-ui:latest
ports:
- 8080:8080
environment:
DYNAMIC_CONFIG_ENABLED: 'true'
volumes:
- ~/kui/config.yml:/etc/kafkaui/dynamic_config.yaml
```
### Prerequisites
Please refer to our [configuration](https://docs.kafka-ui.provectus.io/configuration/quick-start) page to proceed with further app configuration.
Check [software-required.md](documentation/project/contributing/software-required.md)
## Some useful configuration related links
### Building
[Web UI Cluster Configuration Wizard](https://docs.kafka-ui.provectus.io/configuration/configuration-wizard)
Check [building.md](documentation/project/contributing/building.md)
[Configuration file explanation](https://docs.kafka-ui.provectus.io/configuration/configuration-file)
### Running
[Docker Compose examples](https://docs.kafka-ui.provectus.io/configuration/compose-examples)
[Misc configuration properties](https://docs.kafka-ui.provectus.io/configuration/misc-configuration-properties)
## Helm charts
[Quick start](https://docs.kafka-ui.provectus.io/configuration/helm-charts/quick-start)
## Building from sources
[Quick start](https://docs.kafka-ui.provectus.io/development/building/prerequisites) with building
Check [running.md](documentation/project/contributing/running.md)
## Liveliness and readiness probes
Liveliness and readiness endpoint is at `/actuator/health`.<br/>
Liveliness and readiness endpoint is at `/actuator/health`.
Info endpoint (build info) is located at `/actuator/info`.
# Configuration options
## <a name="env_variables"></a> Environment Variables
All of the environment variables/config properties could be found [here](https://docs.kafka-ui.provectus.io/configuration/misc-configuration-properties).
Alternatively, each variable of the .yml file can be set with an environment variable.
For example, if you want to use an environment variable to set the `name` parameter, you can write it like this: `KAFKA_CLUSTERS_2_NAME`
# Contributing
Please refer to [contributing guide](https://docs.kafka-ui.provectus.io/development/contributing), we'll guide you from there.
|Name |Description
|-----------------------|-------------------------------
|`SERVER_SERVLET_CONTEXT_PATH` | URI basePath
|`LOGGING_LEVEL_ROOT` | Setting log level (trace, debug, info, warn, error). Default: info
|`LOGGING_LEVEL_COM_PROVECTUS` |Setting log level (trace, debug, info, warn, error). Default: debug
|`SERVER_PORT` |Port for the embedded server. Default: `8080`
|`KAFKA_ADMIN-CLIENT-TIMEOUT` | Kafka API timeout in ms. Default: `30000`
|`KAFKA_CLUSTERS_0_NAME` | Cluster name
|`KAFKA_CLUSTERS_0_BOOTSTRAPSERVERS` |Address where to connect
|`KAFKA_CLUSTERS_0_KSQLDBSERVER` | KSQL DB server address
|`KAFKA_CLUSTERS_0_PROPERTIES_SECURITY_PROTOCOL` |Security protocol to connect to the brokers. For SSL connection use "SSL", for plaintext connection don't set this environment variable
|`KAFKA_CLUSTERS_0_SCHEMAREGISTRY` |SchemaRegistry's address
|`KAFKA_CLUSTERS_0_SCHEMAREGISTRYAUTH_USERNAME` |SchemaRegistry's basic authentication username
|`KAFKA_CLUSTERS_0_SCHEMAREGISTRYAUTH_PASSWORD` |SchemaRegistry's basic authentication password
|`KAFKA_CLUSTERS_0_SCHEMANAMETEMPLATE` |How keys are saved to schemaRegistry
|`KAFKA_CLUSTERS_0_JMXPORT` |Open jmxPosrts of a broker
|`KAFKA_CLUSTERS_0_READONLY` |Enable read-only mode. Default: false
|`KAFKA_CLUSTERS_0_DISABLELOGDIRSCOLLECTION` |Disable collecting segments information. It should be true for confluent cloud. Default: false
|`KAFKA_CLUSTERS_0_KAFKACONNECT_0_NAME` |Given name for the Kafka Connect cluster
|`KAFKA_CLUSTERS_0_KAFKACONNECT_0_ADDRESS` |Address of the Kafka Connect service endpoint
|`KAFKA_CLUSTERS_0_KAFKACONNECT_0_USERNAME`| Kafka Connect cluster's basic authentication username
|`KAFKA_CLUSTERS_0_KAFKACONNECT_0_PASSWORD`| Kafka Connect cluster's basic authentication password
|`KAFKA_CLUSTERS_0_JMXSSL` |Enable SSL for JMX? `true` or `false`. For advanced setup, see `kafka-ui-jmx-secured.yml`
|`KAFKA_CLUSTERS_0_JMXUSERNAME` |Username for JMX authentication
|`KAFKA_CLUSTERS_0_JMXPASSWORD` |Password for JMX authentication
|`TOPIC_RECREATE_DELAY_SECONDS` |Time delay between topic deletion and topic creation attempts for topic recreate functionality. Default: 1
|`TOPIC_RECREATE_MAXRETRIES` |Number of attempts of topic creation after topic deletion for topic recreate functionality. Default: 15

View file

@ -6,12 +6,8 @@ Following versions of the project are currently being supported with security up
| Version | Supported |
| ------- | ------------------ |
| 0.7.x | :white_check_mark: |
| 0.6.x | :x: |
| 0.5.x | :x: |
| 0.4.x | :x: |
| 0.3.x | :x: |
| 0.2.x | :x: |
| 0.3.x | :white_check_mark: |
| 0.2.x | :x: |
| 0.1.x | :x: |
## Reporting a Vulnerability

View file

@ -0,0 +1,25 @@
# Patterns to ignore when building packages.
# This supports shell glob matching, relative path matching, and
# negation (prefixed with !). Only one pattern per line.
.DS_Store
# Common VCS dirs
.git/
.gitignore
.bzr/
.bzrignore
.hg/
.hgignore
.svn/
# Common backup files
*.swp
*.bak
*.tmp
*.orig
*~
# Various IDEs
.project
.idea/
*.tmproj
.vscode/
example/
README.md

View file

@ -0,0 +1,7 @@
apiVersion: v2
name: kafka-ui
description: A Helm chart for kafka-UI
type: application
version: 0.0.3
appVersion: latest
icon: https://github.com/provectus/kafka-ui/raw/master/images/kafka-ui-logo.png

31
charts/kafka-ui/README.md Normal file
View file

@ -0,0 +1,31 @@
# Kafka-UI Helm Chart
## Configuration
Most of the Helm charts parameters are common, follow table describe unique parameters related to application configuration.
### Kafka-UI parameters
| Parameter| Description| Default|
|---|---|---|
| `existingConfigMap`| Name of the existing ConfigMap with Kafka-UI environment variables | `nil`|
| `existingSecret`| Name of the existing Secret with Kafka-UI environment variables| `nil`|
| `envs.secret`| Set of the sensitive environment variables to pass to Kafka-UI | `{}`|
| `envs.config`| Set of the environment variables to pass to Kafka-UI | `{}`|
| `networkPolicy.enabled` | Enable network policies | `false`|
| `networkPolicy.egressRules.customRules` | Custom network egress policy rules | `[]`|
| `networkPolicy.ingressRules.customRules` | Custom network ingress policy rules | `[]`|
| `podLabels` | Extra labels for Kafka-UI pod | `{}`|
## Example
To install Kafka-UI need to execute follow:
``` bash
helm repo add kafka-ui https://provectus.github.io/kafka-ui
helm install kafka-ui kafka-ui/kafka-ui --set envs.config.KAFKA_CLUSTERS_0_NAME=local --set envs.config.KAFKA_CLUSTERS_0_BOOTSTRAPSERVERS=kafka:9092
```
To connect to Kafka-UI web application need to execute:
``` bash
kubectl port-forward svc/kafka-ui 8080:80
```
Open the `http://127.0.0.1:8080` on the browser to access Kafka-UI.

View file

@ -0,0 +1,3 @@
apiVersion: v1
entries: {}
generated: "2021-11-11T12:26:08.479581+03:00"

View file

@ -0,0 +1,21 @@
1. Get the application URL by running these commands:
{{- if .Values.ingress.enabled }}
{{- range $host := .Values.ingress.hosts }}
{{- range .paths }}
http{{ if $.Values.ingress.tls }}s{{ end }}://{{ $host.host }}{{ . }}
{{- end }}
{{- end }}
{{- else if contains "NodePort" .Values.service.type }}
export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ include "kafka-ui.fullname" . }})
export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}")
echo http://$NODE_IP:$NODE_PORT
{{- else if contains "LoadBalancer" .Values.service.type }}
NOTE: It may take a few minutes for the LoadBalancer IP to be available.
You can watch the status of by running 'kubectl get --namespace {{ .Release.Namespace }} svc -w {{ include "kafka-ui.fullname" . }}'
export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ include "kafka-ui.fullname" . }} --template "{{"{{ range (index .status.loadBalancer.ingress 0) }}{{.}}{{ end }}"}}")
echo http://$SERVICE_IP:{{ .Values.service.port }}
{{- else if contains "ClusterIP" .Values.service.type }}
export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app.kubernetes.io/name={{ include "kafka-ui.name" . }},app.kubernetes.io/instance={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}")
echo "Visit http://127.0.0.1:8080 to use your application"
kubectl --namespace {{ .Release.Namespace }} port-forward $POD_NAME 8080:8080
{{- end }}

View file

@ -0,0 +1,63 @@
{{/* vim: set filetype=mustache: */}}
{{/*
Expand the name of the chart.
*/}}
{{- define "kafka-ui.name" -}}
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }}
{{- end }}
{{/*
Create a default fully qualified app name.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
If release name contains chart name it will be used as a full name.
*/}}
{{- define "kafka-ui.fullname" -}}
{{- if .Values.fullnameOverride }}
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }}
{{- else }}
{{- $name := default .Chart.Name .Values.nameOverride }}
{{- if contains $name .Release.Name }}
{{- .Release.Name | trunc 63 | trimSuffix "-" }}
{{- else }}
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }}
{{- end }}
{{- end }}
{{- end }}
{{/*
Create chart name and version as used by the chart label.
*/}}
{{- define "kafka-ui.chart" -}}
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }}
{{- end }}
{{/*
Common labels
*/}}
{{- define "kafka-ui.labels" -}}
helm.sh/chart: {{ include "kafka-ui.chart" . }}
{{ include "kafka-ui.selectorLabels" . }}
{{- if .Chart.AppVersion }}
app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
{{- end }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
{{- end }}
{{/*
Selector labels
*/}}
{{- define "kafka-ui.selectorLabels" -}}
app.kubernetes.io/name: {{ include "kafka-ui.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
{{- end }}
{{/*
Create the name of the service account to use
*/}}
{{- define "kafka-ui.serviceAccountName" -}}
{{- if .Values.serviceAccount.create }}
{{- default (include "kafka-ui.fullname" .) .Values.serviceAccount.name }}
{{- else }}
{{- default "default" .Values.serviceAccount.name }}
{{- end }}
{{- end }}

View file

@ -0,0 +1,8 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: {{ include "kafka-ui.fullname" . }}
labels:
{{- include "kafka-ui.labels" . | nindent 4 }}
data:
{{- toYaml .Values.envs.config | nindent 2 }}

View file

@ -0,0 +1,103 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ include "kafka-ui.fullname" . }}
labels:
{{- include "kafka-ui.labels" . | nindent 4 }}
spec:
{{- if not .Values.autoscaling.enabled }}
replicas: {{ .Values.replicaCount }}
{{- end }}
selector:
matchLabels:
{{- include "kafka-ui.selectorLabels" . | nindent 6 }}
template:
metadata:
annotations:
{{- with .Values.podAnnotations }}
{{- toYaml . | nindent 8 }}
{{- end }}
checksum/config: {{ include (print $.Template.BasePath "/configmap.yaml") . | sha256sum }}
checksum/secret: {{ include (print $.Template.BasePath "/secret.yaml") . | sha256sum }}
labels:
{{- include "kafka-ui.selectorLabels" . | nindent 8 }}
{{- if .Values.podLabels }}
{{- toYaml .Values.podLabels | nindent 8 }}
{{- end }}
spec:
{{- with .Values.imagePullSecrets }}
imagePullSecrets:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.initContainers }}
initContainers:
{{- toYaml . | nindent 8 }}
{{- end }}
serviceAccountName: {{ include "kafka-ui.serviceAccountName" . }}
securityContext:
{{- toYaml .Values.podSecurityContext | nindent 8 }}
containers:
- name: {{ .Chart.Name }}
securityContext:
{{- toYaml .Values.securityContext | nindent 12 }}
image: "{{ .Values.image.registry }}/{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}"
imagePullPolicy: {{ .Values.image.pullPolicy }}
{{- with .Values.env }}
env:
{{- toYaml . | nindent 12 }}
{{- end }}
envFrom:
{{- if .Values.existingConfigMap }}
- configMapRef:
name: {{ .Values.existingConfigMap }}
{{- end }}
- configMapRef:
name: {{ include "kafka-ui.fullname" . }}
{{- if .Values.existingSecret }}
- secretRef:
name: {{ .Values.existingSecret }}
{{- end }}
- secretRef:
name: {{ include "kafka-ui.fullname" . }}
ports:
- name: http
containerPort: 8080
protocol: TCP
livenessProbe:
httpGet:
{{- $contextPath := .Values.envs.config.SERVER_SERVLET_CONTEXT_PATH | default "" | printf "%s/actuator/health" | urlParse }}
path: {{ get $contextPath "path" }}
port: http
initialDelaySeconds: 60
periodSeconds: 30
timeoutSeconds: 10
readinessProbe:
httpGet:
{{- $contextPath := .Values.envs.config.SERVER_SERVLET_CONTEXT_PATH | default "" | printf "%s/actuator/health" | urlParse }}
path: {{ get $contextPath "path" }}
port: http
initialDelaySeconds: 60
periodSeconds: 30
timeoutSeconds: 10
resources:
{{- toYaml .Values.resources | nindent 12 }}
{{- with .Values.volumeMounts }}
volumeMounts:
{{- toYaml . | nindent 12 }}
{{- end }}
{{- with .Values.volumes }}
volumes:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.nodeSelector }}
nodeSelector:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.affinity }}
affinity:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.tolerations }}
tolerations:
{{- toYaml . | nindent 8 }}
{{- end }}

View file

@ -0,0 +1,28 @@
{{- if .Values.autoscaling.enabled }}
apiVersion: autoscaling/v2beta1
kind: HorizontalPodAutoscaler
metadata:
name: {{ include "kafka-ui.fullname" . }}
labels:
{{- include "kafka-ui.labels" . | nindent 4 }}
spec:
scaleTargetRef:
apiVersion: apps/v1
kind: Deployment
name: {{ include "kafka-ui.fullname" . }}
minReplicas: {{ .Values.autoscaling.minReplicas }}
maxReplicas: {{ .Values.autoscaling.maxReplicas }}
metrics:
{{- if .Values.autoscaling.targetCPUUtilizationPercentage }}
- type: Resource
resource:
name: cpu
targetAverageUtilization: {{ .Values.autoscaling.targetCPUUtilizationPercentage }}
{{- end }}
{{- if .Values.autoscaling.targetMemoryUtilizationPercentage }}
- type: Resource
resource:
name: memory
targetAverageUtilization: {{ .Values.autoscaling.targetMemoryUtilizationPercentage }}
{{- end }}
{{- end }}

View file

@ -0,0 +1,87 @@
{{- if .Values.ingress.enabled -}}
{{- $fullName := include "kafka-ui.fullname" . -}}
{{- $svcPort := .Values.service.port -}}
{{- if $.Capabilities.APIVersions.Has "networking.k8s.io/v1" }}
apiVersion: networking.k8s.io/v1
{{- else if $.Capabilities.APIVersions.Has "networking.k8s.io/v1beta1" }}
apiVersion: networking.k8s.io/v1beta1
{{- else }}
apiVersion: extensions/v1beta1
{{- end }}
kind: Ingress
metadata:
name: {{ $fullName }}
labels:
{{- include "kafka-ui.labels" . | nindent 4 }}
{{- with .Values.ingress.annotations }}
annotations:
{{- toYaml . | nindent 4 }}
{{- end }}
spec:
{{- if .Values.ingress.tls.enabled }}
tls:
- hosts:
- {{ tpl .Values.ingress.host . }}
secretName: {{ .Values.ingress.tls.secretName }}
{{- end }}
{{- if .Values.ingress.ingressClassName }}
ingressClassName: {{ .Values.ingress.ingressClassName }}
{{- end }}
rules:
- http:
paths:
{{- if $.Capabilities.APIVersions.Has "networking.k8s.io/v1" -}}
{{- range .Values.ingress.precedingPaths }}
- path: {{ .path }}
pathType: Prefix
backend:
service:
name: {{ .serviceName }}
port:
number: {{ .servicePort }}
{{- end }}
- backend:
service:
name: {{ $fullName }}
port:
number: {{ $svcPort }}
pathType: Prefix
{{- if .Values.ingress.path }}
path: {{ .Values.ingress.path }}
{{- end }}
{{- range .Values.ingress.succeedingPaths }}
- path: {{ .path }}
pathType: Prefix
backend:
service:
name: {{ .serviceName }}
port:
number: {{ .servicePort }}
{{- end }}
{{- if tpl .Values.ingress.host . }}
host: {{tpl .Values.ingress.host . }}
{{- end }}
{{- else -}}
{{- range .Values.ingress.precedingPaths }}
- path: {{ .path }}
backend:
serviceName: {{ .serviceName }}
servicePort: {{ .servicePort }}
{{- end }}
- backend:
serviceName: {{ $fullName }}
servicePort: {{ $svcPort }}
{{- if .Values.ingress.path }}
path: {{ .Values.ingress.path }}
{{- end }}
{{- range .Values.ingress.succeedingPaths }}
- path: {{ .path }}
backend:
serviceName: {{ .serviceName }}
servicePort: {{ .servicePort }}
{{- end }}
{{- if tpl .Values.ingress.host . }}
host: {{ tpl .Values.ingress.host . }}
{{- end }}
{{- end }}
{{- end }}

View file

@ -0,0 +1,18 @@
{{- if and .Values.networkPolicy.enabled .Values.networkPolicy.egressRules.customRules }}
apiVersion: networking.k8s.io/v1
kind: NetworkPolicy
metadata:
name: {{ printf "%s-egress" (include "kafka-ui.fullname" .) }}
labels:
{{- include "kafka-ui.labels" . | nindent 4 }}
spec:
podSelector:
matchLabels:
{{- include "kafka-ui.selectorLabels" . | nindent 6 }}
policyTypes:
- Egress
egress:
{{- if .Values.networkPolicy.egressRules.customRules }}
{{- toYaml .Values.networkPolicy.egressRules.customRules | nindent 4 }}
{{- end }}
{{- end }}

View file

@ -0,0 +1,18 @@
{{- if and .Values.networkPolicy.enabled .Values.networkPolicy.ingressRules.customRules }}
apiVersion: networking.k8s.io/v1
kind: NetworkPolicy
metadata:
name: {{ printf "%s-ingress" (include "kafka-ui.fullname" .) }}
labels:
{{- include "kafka-ui.labels" . | nindent 4 }}
spec:
podSelector:
matchLabels:
{{- include "kafka-ui.selectorLabels" . | nindent 6 }}
policyTypes:
- Ingress
ingress:
{{- if .Values.networkPolicy.ingressRules.customRules }}
{{- toYaml .Values.networkPolicy.ingressRules.customRules | nindent 4 }}
{{- end }}
{{- end }}

View file

@ -0,0 +1,9 @@
apiVersion: v1
kind: Secret
metadata:
name: {{ include "kafka-ui.fullname" . }}
labels:
{{- include "kafka-ui.labels" . | nindent 4 }}
type: Opaque
data:
{{- toYaml .Values.envs.secret | nindent 2 }}

View file

@ -0,0 +1,22 @@
apiVersion: v1
kind: Service
metadata:
name: {{ include "kafka-ui.fullname" . }}
labels:
{{- include "kafka-ui.labels" . | nindent 4 }}
{{- if .Values.service.annotations }}
annotations:
{{ toYaml .Values.service.annotations | nindent 4 }}
{{- end }}
spec:
type: {{ .Values.service.type }}
ports:
- port: {{ .Values.service.port }}
targetPort: http
protocol: TCP
name: http
{{- if (and (eq .Values.service.type "NodePort") .Values.service.nodePort) }}
nodePort: {{ .Values.service.nodePort }}
{{- end }}
selector:
{{- include "kafka-ui.selectorLabels" . | nindent 4 }}

View file

@ -0,0 +1,12 @@
{{- if .Values.serviceAccount.create -}}
apiVersion: v1
kind: ServiceAccount
metadata:
name: {{ include "kafka-ui.serviceAccountName" . }}
labels:
{{- include "kafka-ui.labels" . | nindent 4 }}
{{- with .Values.serviceAccount.annotations }}
annotations:
{{- toYaml . | nindent 4 }}
{{- end }}
{{- end }}

127
charts/kafka-ui/values.yaml Normal file
View file

@ -0,0 +1,127 @@
replicaCount: 1
image:
registry: docker.io
repository: provectuslabs/kafka-ui
pullPolicy: IfNotPresent
# Overrides the image tag whose default is the chart appVersion.
tag: ""
imagePullSecrets: []
nameOverride: ""
fullnameOverride: ""
serviceAccount:
# Specifies whether a service account should be created
create: true
# Annotations to add to the service account
annotations: {}
# The name of the service account to use.
# If not set and create is true, a name is generated using the fullname template
name: ""
existingConfigMap: ""
existingSecret: ""
envs:
secret: {}
config: {}
networkPolicy:
enabled: false
egressRules:
## Additional custom egress rules
## e.g:
## customRules:
## - to:
## - namespaceSelector:
## matchLabels:
## label: example
customRules: []
ingressRules:
## Additional custom ingress rules
## e.g:
## customRules:
## - from:
## - namespaceSelector:
## matchLabels:
## label: example
customRules: []
podAnnotations: {}
podLabels: {}
podSecurityContext: {}
# fsGroup: 2000
securityContext: {}
# capabilities:
# drop:
# - ALL
# readOnlyRootFilesystem: true
# runAsNonRoot: true
# runAsUser: 1000
service:
type: ClusterIP
port: 80
# if you want to force a specific nodePort. Must be use with service.type=NodePort
# nodePort:
# Ingress configuration
ingress:
# Enable ingress resource
enabled: false
# Annotations for the Ingress
annotations: {}
# ingressClassName for the Ingress
ingressClassName: ""
# The path for the Ingress
path: ""
# The hostname for the Ingress
host: ""
# configs for Ingress TLS
tls:
# Enable TLS termination for the Ingress
enabled: false
# the name of a pre-created Secret containing a TLS private key and certificate
secretName: ""
# HTTP paths to add to the Ingress before the default path
precedingPaths: []
# Http paths to add to the Ingress after the default path
succeedingPaths: []
resources: {}
# limits:
# cpu: 200m
# memory: 512Mi
# requests:
# cpu: 200m
# memory: 256Mi
autoscaling:
enabled: false
minReplicas: 1
maxReplicas: 100
targetCPUUtilizationPercentage: 80
# targetMemoryUtilizationPercentage: 80
nodeSelector: {}
tolerations: []
affinity: {}
env: {}
initContainers: {}
volumeMounts: {}
volumes: {}

45
docker-compose.md Normal file
View file

@ -0,0 +1,45 @@
# Quick Start with docker-compose
Environment variables documentation - [see usage](README.md#env_variables).<br/>
We have plenty of example files with more complex configurations. Please check them out in ``docker`` directory.
* Add a new service in docker-compose.yml
```yaml
version: '2'
services:
kafka-ui:
image: provectuslabs/kafka-ui
container_name: kafka-ui
ports:
- "8080:8080"
restart: always
environment:
- KAFKA_CLUSTERS_0_NAME=local
- KAFKA_CLUSTERS_0_BOOTSTRAPSERVERS=kafka:9092
- KAFKA_CLUSTERS_0_ZOOKEEPER=localhost:2181
```
* If you prefer UI for Apache Kafka in read only mode
```yaml
version: '2'
services:
kafka-ui:
image: provectuslabs/kafka-ui
container_name: kafka-ui
ports:
- "8080:8080"
restart: always
environment:
- KAFKA_CLUSTERS_0_NAME=local
- KAFKA_CLUSTERS_0_BOOTSTRAPSERVERS=kafka:9092
- KAFKA_CLUSTERS_0_ZOOKEEPER=localhost:2181
- KAFKA_CLUSTERS_0_READONLY=true
```
* Start UI for Apache Kafka process
```bash
docker-compose up -d kafka-ui
```

View file

@ -1,16 +1,12 @@
# Descriptions of docker-compose configurations (*.yaml)
1. [kafka-ui.yaml](./kafka-ui.yaml) - Default configuration with 2 kafka clusters with two nodes of Schema Registry, one kafka-connect and a few dummy topics.
2. [kafka-ui-arm64.yaml](./kafka-ui-arm64.yaml) - Default configuration for ARM64(Mac M1) architecture with 1 kafka cluster without zookeeper with one node of Schema Registry, one kafka-connect and a few dummy topics.
3. [kafka-clusters-only.yaml](./kafka-clusters-only.yaml) - A configuration for development purposes, everything besides `kafka-ui` itself (to be run locally).
4. [kafka-ui-ssl.yml](./kafka-ssl.yml) - Connect to Kafka via TLS/SSL
5. [kafka-cluster-sr-auth.yaml](./kafka-cluster-sr-auth.yaml) - Schema registry with authentication.
6. [kafka-ui-auth-context.yaml](./kafka-ui-auth-context.yaml) - Basic (username/password) authentication with custom path (URL) (issue 861).
7. [e2e-tests.yaml](./e2e-tests.yaml) - Configuration with different connectors (github-source, s3, sink-activities, source-activities) and Ksql functionality.
8. [kafka-ui-jmx-secured.yml](./kafka-ui-jmx-secured.yml) - Kafkas JMX with SSL and authentication.
9. [kafka-ui-reverse-proxy.yaml](./nginx-proxy.yaml) - An example for using the app behind a proxy (like nginx).
10. [kafka-ui-sasl.yaml](./kafka-ui-sasl.yaml) - SASL auth for Kafka.
11. [kafka-ui-traefik-proxy.yaml](./traefik-proxy.yaml) - Traefik specific proxy configuration.
12. [oauth-cognito.yaml](./oauth-cognito.yaml) - OAuth2 with Cognito
13. [kafka-ui-with-jmx-exporter.yaml](./kafka-ui-with-jmx-exporter.yaml) - A configuration with 2 kafka clusters with enabled prometheus jmx exporters instead of jmx.
14. [kafka-with-zookeeper.yaml](./kafka-with-zookeeper.yaml) - An example for using kafka with zookeeper
2. [kafka-clusters-only.yaml](./kafka-clusters-only.yaml) - A configuration for development purposes, everything besides `kafka-ui` itself (to be run locally).
3. [kafka-ui-ssl.yml](./kafka-ssl.yml) - Connect to Kafka via TLS/SSL
4. [kafka-cluster-sr-auth.yaml](./kafka-cluster-sr-auth.yaml) - Schema registry with authentication.
5. [kafka-ui-auth-context.yaml](./kafka-ui-auth-context.yaml) - Basic (username/password) authentication with custom path (URL) (issue 861).
6. [kafka-ui-connectors.yaml](./kafka-ui-connectors.yaml) - Configuration with different connectors (github-source, s3, sink-activities, source-activities) and Ksql functionality.
7. [kafka-ui-jmx-secured.yml](./kafka-ui-jmx-secured.yml) - Kafkas JMX with SSL and authentication.
8. [kafka-ui-reverse-proxy.yaml](./kafka-ui-reverse-proxy.yaml) - An example for using the app behind a proxy (like nginx).
9. [kafka-ui-sasl.yaml](./kafka-ui-sasl.yaml) - SASL auth for Kafka.
10. [kafka-ui-traefik-proxy.yaml](./kafka-ui-traefik-proxy.yaml) - Traefik specific proxy configuration.

View file

@ -0,0 +1,86 @@
---
version: '2'
services:
kafka-ui:
container_name: kafka-ui
image: provectuslabs/kafka-ui:latest
ports:
- 8080:8080
depends_on:
- zookeeper0
- kafka0
- schemaregistry0
environment:
KAFKA_CLUSTERS_0_NAME: local
KAFKA_CLUSTERS_0_BOOTSTRAPSERVERS: kafka0:29092
KAFKA_CLUSTERS_0_ZOOKEEPER: zookeeper0:2181
KAFKA_CLUSTERS_0_JMXPORT: 9997
KAFKA_CLUSTERS_0_SCHEMAREGISTRY: http://schemaregistry0:8085
KAFKA_CLUSTERS_0_KAFKACONNECT_0_NAME: first
KAFKA_CLUSTERS_0_KAFKACONNECT_0_ADDRESS: http://kafka-connect0:8083
KAFKA_CLUSTERS_1_NAME: secondLocal
KAFKA_CLUSTERS_1_BOOTSTRAPSERVERS: kafka1:29092
KAFKA_CLUSTERS_1_ZOOKEEPER: zookeeper1:2181
KAFKA_CLUSTERS_1_JMXPORT: 9998
KAFKA_CLUSTERS_1_SCHEMAREGISTRY: http://schemaregistry1:8085
KAFKA_CLUSTERS_1_KAFKACONNECT_0_NAME: first
KAFKA_CLUSTERS_1_KAFKACONNECT_0_ADDRESS: http://kafka-connect0:8083
AUTH_TYPE: "LDAP"
SPRING_LDAP_URLS: "ldap://ldap:10389"
SPRING_LDAP_DN_PATTERN: "cn={0},ou=people,dc=planetexpress,dc=com"
# USER SEARCH FILTER INSTEAD OF DN
# SPRING_LDAP_USERFILTER_SEARCHBASE: "dc=planetexpress,dc=com"
# SPRING_LDAP_USERFILTER_SEARCHFILTER: "(&(uid={0})(objectClass=inetOrgPerson))"
# LDAP ADMIN USER
# SPRING_LDAP_ADMINUSER: "cn=admin,dc=planetexpress,dc=com"
# SPRING_LDAP_ADMINPASSWORD: "GoodNewsEveryone"
ldap:
image: rroemhild/test-openldap:latest
hostname: "ldap"
zookeeper0:
image: confluentinc/cp-zookeeper:5.2.4
environment:
ZOOKEEPER_CLIENT_PORT: 2181
ZOOKEEPER_TICK_TIME: 2000
ports:
- 2181:2181
kafka0:
image: confluentinc/cp-kafka:5.3.1
depends_on:
- zookeeper0
ports:
- 9092:9092
- 9997:9997
environment:
KAFKA_BROKER_ID: 1
KAFKA_ZOOKEEPER_CONNECT: zookeeper0:2181
KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://kafka0:29092,PLAINTEXT_HOST://localhost:9092
KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: PLAINTEXT:PLAINTEXT,PLAINTEXT_HOST:PLAINTEXT
KAFKA_INTER_BROKER_LISTENER_NAME: PLAINTEXT
KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 1
JMX_PORT: 9997
KAFKA_JMX_OPTS: -Dcom.sun.management.jmxremote -Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.ssl=false -Djava.rmi.server.hostname=kafka0 -Dcom.sun.management.jmxremote.rmi.port=9997
schemaregistry0:
image: confluentinc/cp-schema-registry:5.5.0
ports:
- 8085:8085
depends_on:
- zookeeper0
- kafka0
environment:
SCHEMA_REGISTRY_KAFKASTORE_BOOTSTRAP_SERVERS: PLAINTEXT://kafka0:29092
SCHEMA_REGISTRY_KAFKASTORE_CONNECTION_URL: zookeeper0:2181
SCHEMA_REGISTRY_KAFKASTORE_SECURITY_PROTOCOL: PLAINTEXT
SCHEMA_REGISTRY_HOST_NAME: schemaregistry0
SCHEMA_REGISTRY_LISTENERS: http://schemaregistry0:8085
SCHEMA_REGISTRY_SCHEMA_REGISTRY_INTER_INSTANCE_PROTOCOL: "http"
SCHEMA_REGISTRY_LOG4J_ROOT_LOGLEVEL: INFO
SCHEMA_REGISTRY_KAFKASTORE_TOPIC: _schemas

View file

@ -1,190 +0,0 @@
---
version: '3.5'
services:
kafka-ui:
container_name: kafka-ui
image: provectuslabs/kafka-ui:latest
ports:
- 8080:8080
healthcheck:
test: wget --no-verbose --tries=1 --spider http://localhost:8080/actuator/health
interval: 30s
timeout: 10s
retries: 10
depends_on:
kafka0:
condition: service_healthy
schemaregistry0:
condition: service_healthy
kafka-connect0:
condition: service_healthy
environment:
KAFKA_CLUSTERS_0_NAME: local
KAFKA_CLUSTERS_0_BOOTSTRAPSERVERS: kafka0:29092
KAFKA_CLUSTERS_0_METRICS_PORT: 9997
KAFKA_CLUSTERS_0_SCHEMAREGISTRY: http://schemaregistry0:8085
KAFKA_CLUSTERS_0_KAFKACONNECT_0_NAME: first
KAFKA_CLUSTERS_0_KAFKACONNECT_0_ADDRESS: http://kafka-connect0:8083
KAFKA_CLUSTERS_0_KSQLDBSERVER: http://ksqldb:8088
kafka0:
image: confluentinc/cp-kafka:7.2.1
hostname: kafka0
container_name: kafka0
healthcheck:
test: unset JMX_PORT && KAFKA_JMX_OPTS="-Dcom.sun.management.jmxremote -Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.ssl=false -Djava.rmi.server.hostname=kafka0 -Dcom.sun.management.jmxremote.rmi.port=9999" && kafka-broker-api-versions --bootstrap-server=localhost:9092
interval: 30s
timeout: 10s
retries: 10
ports:
- "9092:9092"
- "9997:9997"
environment:
KAFKA_BROKER_ID: 1
KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: 'CONTROLLER:PLAINTEXT,PLAINTEXT:PLAINTEXT,PLAINTEXT_HOST:PLAINTEXT'
KAFKA_ADVERTISED_LISTENERS: 'PLAINTEXT://kafka0:29092,PLAINTEXT_HOST://localhost:9092'
KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 1
KAFKA_GROUP_INITIAL_REBALANCE_DELAY_MS: 0
KAFKA_TRANSACTION_STATE_LOG_MIN_ISR: 1
KAFKA_TRANSACTION_STATE_LOG_REPLICATION_FACTOR: 1
KAFKA_JMX_PORT: 9997
KAFKA_JMX_HOSTNAME: localhost
KAFKA_JMX_OPTS: -Dcom.sun.management.jmxremote -Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.ssl=false -Djava.rmi.server.hostname=kafka0 -Dcom.sun.management.jmxremote.rmi.port=9997
KAFKA_PROCESS_ROLES: 'broker,controller'
KAFKA_NODE_ID: 1
KAFKA_CONTROLLER_QUORUM_VOTERS: '1@kafka0:29093'
KAFKA_LISTENERS: 'PLAINTEXT://kafka0:29092,CONTROLLER://kafka0:29093,PLAINTEXT_HOST://0.0.0.0:9092'
KAFKA_INTER_BROKER_LISTENER_NAME: 'PLAINTEXT'
KAFKA_CONTROLLER_LISTENER_NAMES: 'CONTROLLER'
KAFKA_LOG_DIRS: '/tmp/kraft-combined-logs'
volumes:
- ./scripts/update_run.sh:/tmp/update_run.sh
command: "bash -c 'if [ ! -f /tmp/update_run.sh ]; then echo \"ERROR: Did you forget the update_run.sh file that came with this docker-compose.yml file?\" && exit 1 ; else /tmp/update_run.sh && /etc/confluent/docker/run ; fi'"
schemaregistry0:
image: confluentinc/cp-schema-registry:7.2.1
ports:
- 8085:8085
depends_on:
kafka0:
condition: service_healthy
healthcheck:
test: [ "CMD", "timeout", "1", "curl", "--silent", "--fail", "http://schemaregistry0:8085/subjects" ]
interval: 30s
timeout: 10s
retries: 10
environment:
SCHEMA_REGISTRY_KAFKASTORE_BOOTSTRAP_SERVERS: PLAINTEXT://kafka0:29092
SCHEMA_REGISTRY_KAFKASTORE_SECURITY_PROTOCOL: PLAINTEXT
SCHEMA_REGISTRY_HOST_NAME: schemaregistry0
SCHEMA_REGISTRY_LISTENERS: http://schemaregistry0:8085
SCHEMA_REGISTRY_SCHEMA_REGISTRY_INTER_INSTANCE_PROTOCOL: "http"
SCHEMA_REGISTRY_LOG4J_ROOT_LOGLEVEL: INFO
SCHEMA_REGISTRY_KAFKASTORE_TOPIC: _schemas
kafka-connect0:
build:
context: ./kafka-connect
args:
image: confluentinc/cp-kafka-connect:6.0.1
ports:
- 8083:8083
depends_on:
kafka0:
condition: service_healthy
schemaregistry0:
condition: service_healthy
healthcheck:
test: [ "CMD", "nc", "127.0.0.1", "8083" ]
interval: 30s
timeout: 10s
retries: 10
environment:
CONNECT_BOOTSTRAP_SERVERS: kafka0:29092
CONNECT_GROUP_ID: compose-connect-group
CONNECT_CONFIG_STORAGE_TOPIC: _connect_configs
CONNECT_CONFIG_STORAGE_REPLICATION_FACTOR: 1
CONNECT_OFFSET_STORAGE_TOPIC: _connect_offset
CONNECT_OFFSET_STORAGE_REPLICATION_FACTOR: 1
CONNECT_STATUS_STORAGE_TOPIC: _connect_status
CONNECT_STATUS_STORAGE_REPLICATION_FACTOR: 1
CONNECT_KEY_CONVERTER: org.apache.kafka.connect.storage.StringConverter
CONNECT_KEY_CONVERTER_SCHEMA_REGISTRY_URL: http://schemaregistry0:8085
CONNECT_VALUE_CONVERTER: org.apache.kafka.connect.storage.StringConverter
CONNECT_VALUE_CONVERTER_SCHEMA_REGISTRY_URL: http://schemaregistry0:8085
CONNECT_INTERNAL_KEY_CONVERTER: org.apache.kafka.connect.json.JsonConverter
CONNECT_INTERNAL_VALUE_CONVERTER: org.apache.kafka.connect.json.JsonConverter
CONNECT_REST_ADVERTISED_HOST_NAME: kafka-connect0
CONNECT_PLUGIN_PATH: "/usr/share/java,/usr/share/confluent-hub-components"
# AWS_ACCESS_KEY_ID: ""
# AWS_SECRET_ACCESS_KEY: ""
kafka-init-topics:
image: confluentinc/cp-kafka:7.2.1
volumes:
- ./data/message.json:/data/message.json
depends_on:
kafka0:
condition: service_healthy
command: "bash -c 'echo Waiting for Kafka to be ready... && \
cub kafka-ready -b kafka0:29092 1 30 && \
kafka-topics --create --topic users --partitions 3 --replication-factor 1 --if-not-exists --bootstrap-server kafka0:29092 && \
kafka-topics --create --topic messages --partitions 2 --replication-factor 1 --if-not-exists --bootstrap-server kafka0:29092 && \
kafka-console-producer --bootstrap-server kafka0:29092 --topic users < /data/message.json'"
postgres-db:
build:
context: ./postgres
args:
image: postgres:9.6.22
ports:
- 5432:5432
healthcheck:
test: [ "CMD-SHELL", "pg_isready -U dev_user" ]
interval: 10s
timeout: 5s
retries: 5
environment:
POSTGRES_USER: 'dev_user'
POSTGRES_PASSWORD: '12345'
create-connectors:
image: ellerbrock/alpine-bash-curl-ssl
depends_on:
postgres-db:
condition: service_healthy
kafka-connect0:
condition: service_healthy
volumes:
- ./connectors:/connectors
command: bash -c '/connectors/start.sh'
ksqldb:
image: confluentinc/ksqldb-server:0.18.0
healthcheck:
test: [ "CMD", "timeout", "1", "curl", "--silent", "--fail", "http://localhost:8088/info" ]
interval: 30s
timeout: 10s
retries: 10
depends_on:
kafka0:
condition: service_healthy
kafka-connect0:
condition: service_healthy
schemaregistry0:
condition: service_healthy
ports:
- 8088:8088
environment:
KSQL_CUB_KAFKA_TIMEOUT: 120
KSQL_LISTENERS: http://0.0.0.0:8088
KSQL_BOOTSTRAP_SERVERS: PLAINTEXT://kafka0:29092
KSQL_KSQL_LOGGING_PROCESSING_STREAM_AUTO_CREATE: "true"
KSQL_KSQL_LOGGING_PROCESSING_TOPIC_AUTO_CREATE: "true"
KSQL_KSQL_CONNECT_URL: http://kafka-connect0:8083
KSQL_KSQL_SCHEMA_REGISTRY_URL: http://schemaregistry0:8085
KSQL_KSQL_SERVICE_ID: my_ksql_1
KSQL_KSQL_HIDDEN_TOPICS: '^_.*'
KSQL_CACHE_MAX_BYTES_BUFFERING: 0

0
documentation/compose/jaas/client.properties Executable file → Normal file
View file

0
documentation/compose/jaas/kafka_connect.jaas Executable file → Normal file
View file

0
documentation/compose/jaas/kafka_connect.password Executable file → Normal file
View file

View file

@ -11,8 +11,4 @@ KafkaClient {
user_admin="admin-secret";
};
Client {
org.apache.zookeeper.server.auth.DigestLoginModule required
username="zkuser"
password="zkuserpassword";
};
Client {};

0
documentation/compose/jaas/schema_registry.jaas Executable file → Normal file
View file

0
documentation/compose/jaas/schema_registry.password Executable file → Normal file
View file

View file

@ -1,4 +0,0 @@
Server {
org.apache.zookeeper.server.auth.DigestLoginModule required
user_zkuser="zkuserpassword";
};

View file

@ -1,2 +0,0 @@
rules:
- pattern: ".*"

View file

@ -1,10 +0,0 @@
#!/usr/bin/env bash
JAVA_AGENT_FILE="/usr/share/jmx_exporter/jmx_prometheus_javaagent.jar"
if [ ! -f "$JAVA_AGENT_FILE" ]
then
echo "Downloading jmx_exporter javaagent"
curl -o $JAVA_AGENT_FILE https://repo1.maven.org/maven2/io/prometheus/jmx/jmx_prometheus_javaagent/0.16.1/jmx_prometheus_javaagent-0.16.1.jar
fi
exec /etc/confluent/docker/run

View file

@ -2,44 +2,43 @@
version: '2'
services:
kafka1:
image: confluentinc/cp-kafka:7.2.1
hostname: kafka1
container_name: kafka1
zookeeper1:
image: confluentinc/cp-zookeeper:5.2.4
environment:
ZOOKEEPER_CLIENT_PORT: 2181
ZOOKEEPER_TICK_TIME: 2000
ports:
- "9092:9092"
- "9997:9997"
- 2182:2181
kafka1:
image: confluentinc/cp-kafka:5.3.1
depends_on:
- zookeeper1
environment:
KAFKA_BROKER_ID: 1
KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: 'CONTROLLER:PLAINTEXT,PLAINTEXT:PLAINTEXT,PLAINTEXT_HOST:PLAINTEXT'
KAFKA_ADVERTISED_LISTENERS: 'PLAINTEXT://kafka1:29092,PLAINTEXT_HOST://localhost:9092'
KAFKA_ZOOKEEPER_CONNECT: zookeeper1:2181
KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://kafka1:29092,PLAINTEXT_HOST://localhost:9093
KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: PLAINTEXT:PLAINTEXT,PLAINTEXT_HOST:PLAINTEXT
KAFKA_INTER_BROKER_LISTENER_NAME: PLAINTEXT
KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 1
KAFKA_GROUP_INITIAL_REBALANCE_DELAY_MS: 0
KAFKA_TRANSACTION_STATE_LOG_MIN_ISR: 1
KAFKA_TRANSACTION_STATE_LOG_REPLICATION_FACTOR: 1
KAFKA_JMX_PORT: 9997
KAFKA_JMX_HOSTNAME: localhost
KAFKA_PROCESS_ROLES: 'broker,controller'
KAFKA_NODE_ID: 1
KAFKA_CONTROLLER_QUORUM_VOTERS: '1@kafka1:29093'
KAFKA_LISTENERS: 'PLAINTEXT://kafka1:29092,CONTROLLER://kafka1:29093,PLAINTEXT_HOST://0.0.0.0:9092'
KAFKA_INTER_BROKER_LISTENER_NAME: 'PLAINTEXT'
KAFKA_CONTROLLER_LISTENER_NAMES: 'CONTROLLER'
KAFKA_LOG_DIRS: '/tmp/kraft-combined-logs'
volumes:
- ./scripts/update_run.sh:/tmp/update_run.sh
command: "bash -c 'if [ ! -f /tmp/update_run.sh ]; then echo \"ERROR: Did you forget the update_run.sh file that came with this docker-compose.yml file?\" && exit 1 ; else /tmp/update_run.sh && /etc/confluent/docker/run ; fi'"
JMX_PORT: 9998
KAFKA_JMX_OPTS: -Dcom.sun.management.jmxremote -Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.ssl=false -Djava.rmi.server.hostname=localhost -Dcom.sun.management.jmxremote.rmi.port=9998
ports:
- 9093:9093
- 9998:9998
schemaregistry1:
image: confluentinc/cp-schema-registry:7.2.1
image: confluentinc/cp-schema-registry:5.5.0
ports:
- 18085:8085
depends_on:
- zookeeper1
- kafka1
volumes:
- ./jaas:/conf
environment:
SCHEMA_REGISTRY_KAFKASTORE_BOOTSTRAP_SERVERS: PLAINTEXT://kafka1:29092
SCHEMA_REGISTRY_KAFKASTORE_CONNECTION_URL: zookeeper1:2181
SCHEMA_REGISTRY_KAFKASTORE_SECURITY_PROTOCOL: PLAINTEXT
SCHEMA_REGISTRY_HOST_NAME: schemaregistry1
SCHEMA_REGISTRY_LISTENERS: http://schemaregistry1:8085
@ -55,29 +54,13 @@ services:
SCHEMA_REGISTRY_KAFKASTORE_TOPIC: _schemas
kafka-init-topics:
image: confluentinc/cp-kafka:7.2.1
image: confluentinc/cp-kafka:5.3.1
volumes:
- ./data/message.json:/data/message.json
- ./message.json:/data/message.json
depends_on:
- kafka1
command: "bash -c 'echo Waiting for Kafka to be ready... && \
cub kafka-ready -b kafka1:29092 1 30 && \
kafka-topics --create --topic users --partitions 3 --replication-factor 1 --if-not-exists --bootstrap-server kafka1:29092 && \
kafka-topics --create --topic messages --partitions 2 --replication-factor 1 --if-not-exists --bootstrap-server kafka1:29092 && \
kafka-console-producer --bootstrap-server kafka1:29092 --topic users < /data/message.json'"
kafka-ui:
container_name: kafka-ui
image: provectuslabs/kafka-ui:latest
ports:
- 8080:8080
depends_on:
- kafka1
- schemaregistry1
environment:
KAFKA_CLUSTERS_0_NAME: local
KAFKA_CLUSTERS_0_BOOTSTRAPSERVERS: kafka1:29092
KAFKA_CLUSTERS_0_METRICS_PORT: 9997
KAFKA_CLUSTERS_0_SCHEMAREGISTRY: http://schemaregistry1:8085
KAFKA_CLUSTERS_0_SCHEMAREGISTRYAUTH_USERNAME: admin
KAFKA_CLUSTERS_0_SCHEMAREGISTRYAUTH_PASSWORD: letmein
kafka-topics --create --topic second.users --partitions 3 --replication-factor 1 --if-not-exists --zookeeper zookeeper1:2181 && \
kafka-topics --create --topic second.messages --partitions 2 --replication-factor 1 --if-not-exists --zookeeper zookeeper1:2181 && \
kafka-console-producer --broker-list kafka1:29092 -topic second.users < /data/message.json'"

View file

@ -0,0 +1,146 @@
---
version: '2'
services:
zookeeper0:
image: confluentinc/cp-zookeeper:5.2.4
environment:
ZOOKEEPER_CLIENT_PORT: 2181
ZOOKEEPER_TICK_TIME: 2000
ports:
- 2181:2181
kafka0:
image: confluentinc/cp-kafka:5.3.1
depends_on:
- zookeeper0
environment:
KAFKA_BROKER_ID: 1
KAFKA_ZOOKEEPER_CONNECT: zookeeper0:2181
KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://kafka0:29092,PLAINTEXT_HOST://localhost:9092
KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: PLAINTEXT:PLAINTEXT,PLAINTEXT_HOST:PLAINTEXT
KAFKA_INTER_BROKER_LISTENER_NAME: PLAINTEXT
KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 2
JMX_PORT: 9997
KAFKA_JMX_OPTS: -Dcom.sun.management.jmxremote -Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.ssl=false -Djava.rmi.server.hostname=localhost -Dcom.sun.management.jmxremote.rmi.port=9997
ports:
- 9092:9092
- 9997:9997
kafka01:
image: confluentinc/cp-kafka:5.3.1
depends_on:
- zookeeper0
environment:
KAFKA_BROKER_ID: 2
KAFKA_ZOOKEEPER_CONNECT: zookeeper0:2181
KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://kafka01:29092,PLAINTEXT_HOST://localhost:9094
KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: PLAINTEXT:PLAINTEXT,PLAINTEXT_HOST:PLAINTEXT,PLAIN:PLAINTEXT
KAFKA_INTER_BROKER_LISTENER_NAME: PLAINTEXT
KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 2
JMX_PORT: 9999
KAFKA_JMX_OPTS: -Dcom.sun.management.jmxremote -Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.ssl=false -Djava.rmi.server.hostname=localhost -Dcom.sun.management.jmxremote.rmi.port=9999
ports:
- 9094:9094
- 9999:9999
zookeeper1:
image: confluentinc/cp-zookeeper:5.2.4
environment:
ZOOKEEPER_CLIENT_PORT: 2181
ZOOKEEPER_TICK_TIME: 2000
ports:
- 2182:2181
kafka1:
image: confluentinc/cp-kafka:5.3.1
depends_on:
- zookeeper1
environment:
KAFKA_BROKER_ID: 1
KAFKA_ZOOKEEPER_CONNECT: zookeeper1:2181
KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://kafka1:29092,PLAINTEXT_HOST://localhost:9093
KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: PLAINTEXT:PLAINTEXT,PLAINTEXT_HOST:PLAINTEXT
KAFKA_INTER_BROKER_LISTENER_NAME: PLAINTEXT
KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 1
JMX_PORT: 9998
KAFKA_JMX_OPTS: -Dcom.sun.management.jmxremote -Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.ssl=false -Djava.rmi.server.hostname=localhost -Dcom.sun.management.jmxremote.rmi.port=9998
ports:
- 9093:9093
- 9998:9998
schemaregistry0:
image: confluentinc/cp-schema-registry:5.5.0
depends_on:
- zookeeper0
- kafka0
- kafka01
environment:
SCHEMA_REGISTRY_KAFKASTORE_BOOTSTRAP_SERVERS: PLAINTEXT://kafka0:29092,PLAINTEXT://kafka01:29092
SCHEMA_REGISTRY_KAFKASTORE_CONNECTION_URL: zookeeper0:2181
SCHEMA_REGISTRY_KAFKASTORE_SECURITY_PROTOCOL: PLAINTEXT
SCHEMA_REGISTRY_HOST_NAME: schemaregistry0
SCHEMA_REGISTRY_LISTENERS: http://schemaregistry0:8085
SCHEMA_REGISTRY_SCHEMA_REGISTRY_INTER_INSTANCE_PROTOCOL: "http"
SCHEMA_REGISTRY_LOG4J_ROOT_LOGLEVEL: INFO
SCHEMA_REGISTRY_KAFKASTORE_TOPIC: _schemas
ports:
- 8085:8085
schemaregistry1:
image: confluentinc/cp-schema-registry:5.5.0
ports:
- 18085:8085
depends_on:
- zookeeper1
- kafka1
environment:
SCHEMA_REGISTRY_KAFKASTORE_BOOTSTRAP_SERVERS: PLAINTEXT://kafka1:29092
SCHEMA_REGISTRY_KAFKASTORE_CONNECTION_URL: zookeeper1:2181
SCHEMA_REGISTRY_KAFKASTORE_SECURITY_PROTOCOL: PLAINTEXT
SCHEMA_REGISTRY_HOST_NAME: schemaregistry1
SCHEMA_REGISTRY_LISTENERS: http://schemaregistry1:8085
SCHEMA_REGISTRY_SCHEMA_REGISTRY_INTER_INSTANCE_PROTOCOL: "http"
SCHEMA_REGISTRY_LOG4J_ROOT_LOGLEVEL: INFO
SCHEMA_REGISTRY_KAFKASTORE_TOPIC: _schemas
kafka-connect0:
image: confluentinc/cp-kafka-connect:6.0.1
ports:
- 8083:8083
depends_on:
- kafka0
- schemaregistry0
environment:
CONNECT_BOOTSTRAP_SERVERS: kafka0:29092
CONNECT_GROUP_ID: compose-connect-group
CONNECT_CONFIG_STORAGE_TOPIC: _connect_configs
CONNECT_CONFIG_STORAGE_REPLICATION_FACTOR: 1
CONNECT_OFFSET_STORAGE_TOPIC: _connect_offset
CONNECT_OFFSET_STORAGE_REPLICATION_FACTOR: 1
CONNECT_STATUS_STORAGE_TOPIC: _connect_status
CONNECT_STATUS_STORAGE_REPLICATION_FACTOR: 1
CONNECT_KEY_CONVERTER: org.apache.kafka.connect.storage.StringConverter
CONNECT_KEY_CONVERTER_SCHEMA_REGISTRY_URL: http://schemaregistry0:8085
CONNECT_VALUE_CONVERTER: org.apache.kafka.connect.storage.StringConverter
CONNECT_VALUE_CONVERTER_SCHEMA_REGISTRY_URL: http://schemaregistry0:8085
CONNECT_INTERNAL_KEY_CONVERTER: org.apache.kafka.connect.json.JsonConverter
CONNECT_INTERNAL_VALUE_CONVERTER: org.apache.kafka.connect.json.JsonConverter
CONNECT_REST_ADVERTISED_HOST_NAME: kafka-connect0
CONNECT_PLUGIN_PATH: "/usr/share/java,/usr/share/confluent-hub-components"
kafka-init-topics:
image: confluentinc/cp-kafka:5.3.1
volumes:
- ./message.json:/data/message.json
depends_on:
- kafka1
command: "bash -c 'echo Waiting for Kafka to be ready... && \
cub kafka-ready -b kafka1:29092 1 30 && \
kafka-topics --create --topic second.users --partitions 3 --replication-factor 1 --if-not-exists --zookeeper zookeeper1:2181 && \
kafka-topics --create --topic second.messages --partitions 2 --replication-factor 1 --if-not-exists --zookeeper zookeeper1:2181 && \
kafka-topics --create --topic first.messages --partitions 2 --replication-factor 1 --if-not-exists --zookeeper zookeeper0:2181 && \
kafka-console-producer --broker-list kafka1:29092 -topic second.users < /data/message.json'"

View file

@ -1,178 +0,0 @@
---
version: '3.4'
services:
kafka-ui:
container_name: kafka-ui
image: provectuslabs/kafka-ui:latest
ports:
- 8080:8080
depends_on:
- kafka0
- schemaregistry0
- kafka-connect0
- ksqldb0
environment:
KAFKA_CLUSTERS_0_NAME: local
KAFKA_CLUSTERS_0_PROPERTIES_SECURITY_PROTOCOL: SSL
KAFKA_CLUSTERS_0_BOOTSTRAPSERVERS: kafka0:29092 # SSL LISTENER!
KAFKA_CLUSTERS_0_PROPERTIES_SSL_ENDPOINT_IDENTIFICATION_ALGORITHM: '' # DISABLE COMMON NAME VERIFICATION
KAFKA_CLUSTERS_0_SCHEMAREGISTRY: https://schemaregistry0:8085
KAFKA_CLUSTERS_0_SCHEMAREGISTRYSSL_KEYSTORELOCATION: /kafka.keystore.jks
KAFKA_CLUSTERS_0_SCHEMAREGISTRYSSL_KEYSTOREPASSWORD: "secret"
KAFKA_CLUSTERS_0_KSQLDBSERVER: https://ksqldb0:8088
KAFKA_CLUSTERS_0_KSQLDBSERVERSSL_KEYSTORELOCATION: /kafka.keystore.jks
KAFKA_CLUSTERS_0_KSQLDBSERVERSSL_KEYSTOREPASSWORD: "secret"
KAFKA_CLUSTERS_0_KAFKACONNECT_0_NAME: local
KAFKA_CLUSTERS_0_KAFKACONNECT_0_ADDRESS: https://kafka-connect0:8083
KAFKA_CLUSTERS_0_KAFKACONNECT_0_KEYSTORELOCATION: /kafka.keystore.jks
KAFKA_CLUSTERS_0_KAFKACONNECT_0_KEYSTOREPASSWORD: "secret"
KAFKA_CLUSTERS_0_SSL_TRUSTSTORELOCATION: /kafka.truststore.jks
KAFKA_CLUSTERS_0_SSL_TRUSTSTOREPASSWORD: "secret"
DYNAMIC_CONFIG_ENABLED: 'true' # not necessary for ssl, added for tests
volumes:
- ./ssl/kafka.truststore.jks:/kafka.truststore.jks
- ./ssl/kafka.keystore.jks:/kafka.keystore.jks
kafka0:
image: confluentinc/cp-kafka:7.2.1
hostname: kafka0
container_name: kafka0
ports:
- "9092:9092"
- "9997:9997"
environment:
KAFKA_BROKER_ID: 1
KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: 'CONTROLLER:PLAINTEXT,SSL:SSL,PLAINTEXT_HOST:PLAINTEXT'
KAFKA_ADVERTISED_LISTENERS: 'SSL://kafka0:29092,PLAINTEXT_HOST://localhost:9092'
KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 1
KAFKA_GROUP_INITIAL_REBALANCE_DELAY_MS: 0
KAFKA_TRANSACTION_STATE_LOG_MIN_ISR: 1
KAFKA_TRANSACTION_STATE_LOG_REPLICATION_FACTOR: 1
KAFKA_JMX_PORT: 9997
KAFKA_JMX_HOSTNAME: localhost
KAFKA_PROCESS_ROLES: 'broker,controller'
KAFKA_NODE_ID: 1
KAFKA_CONTROLLER_QUORUM_VOTERS: '1@kafka0:29093'
KAFKA_LISTENERS: 'SSL://kafka0:29092,CONTROLLER://kafka0:29093,PLAINTEXT_HOST://0.0.0.0:9092'
KAFKA_INTER_BROKER_LISTENER_NAME: 'SSL'
KAFKA_CONTROLLER_LISTENER_NAMES: 'CONTROLLER'
KAFKA_LOG_DIRS: '/tmp/kraft-combined-logs'
KAFKA_SECURITY_PROTOCOL: SSL
KAFKA_SSL_ENABLED_MECHANISMS: PLAIN,SSL
KAFKA_SSL_KEYSTORE_FILENAME: kafka.keystore.jks
KAFKA_SSL_KEYSTORE_CREDENTIALS: creds
KAFKA_SSL_KEY_CREDENTIALS: creds
KAFKA_SSL_TRUSTSTORE_FILENAME: kafka.truststore.jks
KAFKA_SSL_TRUSTSTORE_CREDENTIALS: creds
#KAFKA_SSL_CLIENT_AUTH: 'required'
KAFKA_SSL_CLIENT_AUTH: 'requested'
KAFKA_SSL_ENDPOINT_IDENTIFICATION_ALGORITHM: '' # COMMON NAME VERIFICATION IS DISABLED SERVER-SIDE
volumes:
- ./scripts/update_run.sh:/tmp/update_run.sh
- ./ssl/creds:/etc/kafka/secrets/creds
- ./ssl/kafka.truststore.jks:/etc/kafka/secrets/kafka.truststore.jks
- ./ssl/kafka.keystore.jks:/etc/kafka/secrets/kafka.keystore.jks
command: "bash -c 'if [ ! -f /tmp/update_run.sh ]; then echo \"ERROR: Did you forget the update_run.sh file that came with this docker-compose.yml file?\" && exit 1 ; else /tmp/update_run.sh && /etc/confluent/docker/run ; fi'"
schemaregistry0:
image: confluentinc/cp-schema-registry:7.2.1
depends_on:
- kafka0
environment:
SCHEMA_REGISTRY_KAFKASTORE_BOOTSTRAP_SERVERS: SSL://kafka0:29092
SCHEMA_REGISTRY_KAFKASTORE_SECURITY_PROTOCOL: SSL
SCHEMA_REGISTRY_KAFKASTORE_SSL_TRUSTSTORE_LOCATION: /kafka.truststore.jks
SCHEMA_REGISTRY_KAFKASTORE_SSL_TRUSTSTORE_PASSWORD: secret
SCHEMA_REGISTRY_KAFKASTORE_SSL_KEYSTORE_LOCATION: /kafka.keystore.jks
SCHEMA_REGISTRY_KAFKASTORE_SSL_KEYSTORE_PASSWORD: secret
SCHEMA_REGISTRY_KAFKASTORE_SSL_KEY_PASSWORD: secret
SCHEMA_REGISTRY_HOST_NAME: schemaregistry0
SCHEMA_REGISTRY_LISTENERS: https://schemaregistry0:8085
SCHEMA_REGISTRY_INTER_INSTANCE_PROTOCOL: https
SCHEMA_REGISTRY_SCHEMA_REGISTRY_INTER_INSTANCE_PROTOCOL: "https"
SCHEMA_REGISTRY_LOG4J_ROOT_LOGLEVEL: INFO
SCHEMA_REGISTRY_KAFKASTORE_TOPIC: _schemas
SCHEMA_REGISTRY_SSL_CLIENT_AUTHENTICATION: "REQUIRED"
SCHEMA_REGISTRY_SSL_TRUSTSTORE_LOCATION: /kafka.truststore.jks
SCHEMA_REGISTRY_SSL_TRUSTSTORE_PASSWORD: secret
SCHEMA_REGISTRY_SSL_KEYSTORE_LOCATION: /kafka.keystore.jks
SCHEMA_REGISTRY_SSL_KEYSTORE_PASSWORD: secret
SCHEMA_REGISTRY_SSL_KEY_PASSWORD: secret
ports:
- 8085:8085
volumes:
- ./ssl/kafka.truststore.jks:/kafka.truststore.jks
- ./ssl/kafka.keystore.jks:/kafka.keystore.jks
kafka-connect0:
image: confluentinc/cp-kafka-connect:7.2.1
ports:
- 8083:8083
depends_on:
- kafka0
- schemaregistry0
environment:
CONNECT_BOOTSTRAP_SERVERS: kafka0:29092
CONNECT_GROUP_ID: compose-connect-group
CONNECT_CONFIG_STORAGE_TOPIC: _connect_configs
CONNECT_CONFIG_STORAGE_REPLICATION_FACTOR: 1
CONNECT_OFFSET_STORAGE_TOPIC: _connect_offset
CONNECT_OFFSET_STORAGE_REPLICATION_FACTOR: 1
CONNECT_STATUS_STORAGE_TOPIC: _connect_status
CONNECT_STATUS_STORAGE_REPLICATION_FACTOR: 1
CONNECT_KEY_CONVERTER: org.apache.kafka.connect.storage.StringConverter
CONNECT_KEY_CONVERTER_SCHEMA_REGISTRY_URL: https://schemaregistry0:8085
CONNECT_VALUE_CONVERTER: org.apache.kafka.connect.storage.StringConverter
CONNECT_VALUE_CONVERTER_SCHEMA_REGISTRY_URL: https://schemaregistry0:8085
CONNECT_INTERNAL_KEY_CONVERTER: org.apache.kafka.connect.json.JsonConverter
CONNECT_INTERNAL_VALUE_CONVERTER: org.apache.kafka.connect.json.JsonConverter
CONNECT_REST_ADVERTISED_HOST_NAME: kafka-connect0
CONNECT_PLUGIN_PATH: "/usr/share/java,/usr/share/confluent-hub-components"
CONNECT_SECURITY_PROTOCOL: "SSL"
CONNECT_SSL_KEYSTORE_LOCATION: "/kafka.keystore.jks"
CONNECT_SSL_KEY_PASSWORD: "secret"
CONNECT_SSL_KEYSTORE_PASSWORD: "secret"
CONNECT_SSL_TRUSTSTORE_LOCATION: "/kafka.truststore.jks"
CONNECT_SSL_TRUSTSTORE_PASSWORD: "secret"
CONNECT_SSL_CLIENT_AUTH: "requested"
CONNECT_REST_ADVERTISED_LISTENER: "https"
CONNECT_LISTENERS: "https://kafka-connect0:8083"
volumes:
- ./ssl/kafka.truststore.jks:/kafka.truststore.jks
- ./ssl/kafka.keystore.jks:/kafka.keystore.jks
ksqldb0:
image: confluentinc/ksqldb-server:0.18.0
depends_on:
- kafka0
- kafka-connect0
- schemaregistry0
ports:
- 8088:8088
environment:
KSQL_CUB_KAFKA_TIMEOUT: 120
KSQL_LISTENERS: https://0.0.0.0:8088
KSQL_BOOTSTRAP_SERVERS: SSL://kafka0:29092
KSQL_SECURITY_PROTOCOL: SSL
KSQL_SSL_TRUSTSTORE_LOCATION: /kafka.truststore.jks
KSQL_SSL_TRUSTSTORE_PASSWORD: secret
KSQL_SSL_KEYSTORE_LOCATION: /kafka.keystore.jks
KSQL_SSL_KEYSTORE_PASSWORD: secret
KSQL_SSL_KEY_PASSWORD: secret
KSQL_SSL_CLIENT_AUTHENTICATION: REQUIRED
KSQL_KSQL_LOGGING_PROCESSING_STREAM_AUTO_CREATE: "true"
KSQL_KSQL_LOGGING_PROCESSING_TOPIC_AUTO_CREATE: "true"
KSQL_KSQL_CONNECT_URL: https://kafka-connect0:8083
KSQL_KSQL_SCHEMA_REGISTRY_URL: https://schemaregistry0:8085
KSQL_KSQL_SERVICE_ID: my_ksql_1
KSQL_KSQL_HIDDEN_TOPICS: '^_.*'
KSQL_CACHE_MAX_BYTES_BUFFERING: 0
volumes:
- ./ssl/kafka.truststore.jks:/kafka.truststore.jks
- ./ssl/kafka.keystore.jks:/kafka.keystore.jks

View file

@ -1,50 +1,47 @@
---
version: '3.4'
services:
kafka-ui:
container_name: kafka-ui
image: provectuslabs/kafka-ui:latest
ports:
- 8080:8080
depends_on:
- kafka
- zookeeper0
- kafka0
environment:
KAFKA_CLUSTERS_0_NAME: local
KAFKA_CLUSTERS_0_PROPERTIES_SECURITY_PROTOCOL: SSL
KAFKA_CLUSTERS_0_PROPERTIES_SSL_KEYSTORE_LOCATION: /kafka.keystore.jks
KAFKA_CLUSTERS_0_PROPERTIES_SSL_KEYSTORE_PASSWORD: "secret"
KAFKA_CLUSTERS_0_BOOTSTRAPSERVERS: kafka:29092 # SSL LISTENER!
KAFKA_CLUSTERS_0_SSL_TRUSTSTORELOCATION: /kafka.truststore.jks
KAFKA_CLUSTERS_0_SSL_TRUSTSTOREPASSWORD: "secret"
KAFKA_CLUSTERS_0_PROPERTIES_SSL_ENDPOINT_IDENTIFICATION_ALGORITHM: '' # DISABLE COMMON NAME VERIFICATION
KAFKA_CLUSTERS_0_BOOTSTRAPSERVERS: kafka0:29092 # SSL LISTENER!
KAFKA_CLUSTERS_0_ZOOKEEPER: zookeeper0:2181
KAFKA_CLUSTERS_0_PROPERTIES_SSL_TRUSTSTORE_LOCATION: /kafka.truststore.jks
KAFKA_CLUSTERS_0_PROPERTIES_SSL_TRUSTSTORE_PASSWORD: secret
volumes:
- ./ssl/kafka.truststore.jks:/kafka.truststore.jks
- ./ssl/kafka.keystore.jks:/kafka.keystore.jks
kafka:
image: confluentinc/cp-kafka:7.2.1
hostname: kafka
container_name: kafka
zookeeper0:
image: confluentinc/cp-zookeeper:6.0.1
environment:
ZOOKEEPER_CLIENT_PORT: 2181
ZOOKEEPER_TICK_TIME: 2000
ports:
- "9092:9092"
- "9997:9997"
- 2181:2181
kafka0:
image: confluentinc/cp-kafka:6.0.1
hostname: kafka0
depends_on:
- zookeeper0
ports:
- '9092:9092'
environment:
KAFKA_BROKER_ID: 1
KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: 'CONTROLLER:PLAINTEXT,SSL:SSL,PLAINTEXT_HOST:PLAINTEXT'
KAFKA_ADVERTISED_LISTENERS: 'SSL://kafka:29092,PLAINTEXT_HOST://localhost:9092'
KAFKA_ZOOKEEPER_CONNECT: zookeeper0:2181
KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 1
KAFKA_GROUP_INITIAL_REBALANCE_DELAY_MS: 0
KAFKA_TRANSACTION_STATE_LOG_MIN_ISR: 1
KAFKA_TRANSACTION_STATE_LOG_REPLICATION_FACTOR: 1
KAFKA_JMX_PORT: 9997
KAFKA_JMX_HOSTNAME: localhost
KAFKA_PROCESS_ROLES: 'broker,controller'
KAFKA_NODE_ID: 1
KAFKA_CONTROLLER_QUORUM_VOTERS: '1@kafka:29093'
KAFKA_LISTENERS: 'SSL://kafka:29092,CONTROLLER://kafka:29093,PLAINTEXT_HOST://0.0.0.0:9092'
KAFKA_INTER_BROKER_LISTENER_NAME: 'SSL'
KAFKA_CONTROLLER_LISTENER_NAMES: 'CONTROLLER'
KAFKA_LOG_DIRS: '/tmp/kraft-combined-logs'
KAFKA_ADVERTISED_LISTENERS: SSL://kafka0:29092,PLAINTEXT_HOST://localhost:9092
KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: SSL:SSL,PLAINTEXT_HOST:PLAINTEXT
KAFKA_INTER_BROKER_LISTENER_NAME: SSL
KAFKA_SECURITY_PROTOCOL: SSL
KAFKA_SSL_ENABLED_MECHANISMS: PLAIN,SSL
KAFKA_SSL_KEYSTORE_FILENAME: kafka.keystore.jks
@ -53,11 +50,9 @@ services:
KAFKA_SSL_TRUSTSTORE_FILENAME: kafka.truststore.jks
KAFKA_SSL_TRUSTSTORE_CREDENTIALS: creds
#KAFKA_SSL_CLIENT_AUTH: 'required'
KAFKA_SSL_CLIENT_AUTH: 'requested'
KAFKA_SSL_CLIENT_AUTH: "requested"
KAFKA_SSL_ENDPOINT_IDENTIFICATION_ALGORITHM: '' # COMMON NAME VERIFICATION IS DISABLED SERVER-SIDE
volumes:
- ./scripts/update_run.sh:/tmp/update_run.sh
- ./ssl/creds:/etc/kafka/secrets/creds
- ./ssl/kafka.truststore.jks:/etc/kafka/secrets/kafka.truststore.jks
- ./ssl/kafka.keystore.jks:/etc/kafka/secrets/kafka.keystore.jks
command: "bash -c 'if [ ! -f /tmp/update_run.sh ]; then echo \"ERROR: Did you forget the update_run.sh file that came with this docker-compose.yml file?\" && exit 1 ; else /tmp/update_run.sh && /etc/confluent/docker/run ; fi'"

View file

@ -1,59 +0,0 @@
---
version: '2'
services:
kafka-ui:
container_name: kafka-ui
image: provectuslabs/kafka-ui:latest
ports:
- 8080:8080
depends_on:
- zookeeper
- kafka
environment:
KAFKA_CLUSTERS_0_NAME: local
KAFKA_CLUSTERS_0_BOOTSTRAPSERVERS: kafka:29092
KAFKA_CLUSTERS_0_PROPERTIES_SECURITY_PROTOCOL: SASL_PLAINTEXT
KAFKA_CLUSTERS_0_PROPERTIES_SASL_MECHANISM: PLAIN
KAFKA_CLUSTERS_0_PROPERTIES_SASL_JAAS_CONFIG: 'org.apache.kafka.common.security.plain.PlainLoginModule required username="admin" password="admin-secret";'
zookeeper:
image: wurstmeister/zookeeper:3.4.6
environment:
JVMFLAGS: "-Djava.security.auth.login.config=/etc/zookeeper/zookeeper_jaas.conf"
volumes:
- ./jaas/zookeeper_jaas.conf:/etc/zookeeper/zookeeper_jaas.conf
ports:
- 2181:2181
kafka:
image: confluentinc/cp-kafka:7.2.1
hostname: kafka
container_name: kafka
ports:
- "9092:9092"
- "9997:9997"
environment:
KAFKA_BROKER_ID: 1
KAFKA_ZOOKEEPER_CONNECT: 'zookeeper:2181'
KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: 'CONTROLLER:PLAINTEXT,SASL_PLAINTEXT:SASL_PLAINTEXT,PLAINTEXT_HOST:PLAINTEXT'
KAFKA_ADVERTISED_LISTENERS: 'SASL_PLAINTEXT://kafka:29092,PLAINTEXT_HOST://localhost:9092'
KAFKA_OPTS: "-Djava.security.auth.login.config=/etc/kafka/jaas/kafka_server.conf"
KAFKA_AUTHORIZER_CLASS_NAME: "kafka.security.authorizer.AclAuthorizer"
KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 1
KAFKA_GROUP_INITIAL_REBALANCE_DELAY_MS: 0
KAFKA_TRANSACTION_STATE_LOG_MIN_ISR: 1
KAFKA_TRANSACTION_STATE_LOG_REPLICATION_FACTOR: 1
KAFKA_JMX_PORT: 9997
KAFKA_JMX_HOSTNAME: localhost
KAFKA_NODE_ID: 1
KAFKA_CONTROLLER_QUORUM_VOTERS: '1@kafka:29093'
KAFKA_LISTENERS: 'SASL_PLAINTEXT://kafka:29092,CONTROLLER://kafka:29093,PLAINTEXT_HOST://0.0.0.0:9092'
KAFKA_INTER_BROKER_LISTENER_NAME: 'SASL_PLAINTEXT'
KAFKA_SASL_ENABLED_MECHANISMS: 'PLAIN'
KAFKA_SASL_MECHANISM_INTER_BROKER_PROTOCOL: 'PLAIN'
KAFKA_SECURITY_PROTOCOL: 'SASL_PLAINTEXT'
KAFKA_SUPER_USERS: 'User:admin'
volumes:
- ./scripts/update_run.sh:/tmp/update_run.sh
- ./jaas:/etc/kafka/jaas

View file

@ -1,106 +0,0 @@
# ARM64 supported images for kafka can be found here
# https://hub.docker.com/r/confluentinc/cp-kafka/tags?page=1&name=arm64
---
version: '2'
services:
kafka-ui:
container_name: kafka-ui
image: provectuslabs/kafka-ui:latest
ports:
- 8080:8080
depends_on:
- kafka0
- schema-registry0
- kafka-connect0
environment:
KAFKA_CLUSTERS_0_NAME: local
KAFKA_CLUSTERS_0_BOOTSTRAPSERVERS: kafka0:29092
KAFKA_CLUSTERS_0_METRICS_PORT: 9997
KAFKA_CLUSTERS_0_SCHEMAREGISTRY: http://schema-registry0:8085
KAFKA_CLUSTERS_0_KAFKACONNECT_0_NAME: first
KAFKA_CLUSTERS_0_KAFKACONNECT_0_ADDRESS: http://kafka-connect0:8083
DYNAMIC_CONFIG_ENABLED: 'true' # not necessary, added for tests
KAFKA_CLUSTERS_0_AUDIT_TOPICAUDITENABLED: 'true'
KAFKA_CLUSTERS_0_AUDIT_CONSOLEAUDITENABLED: 'true'
kafka0:
image: confluentinc/cp-kafka:7.2.1.arm64
hostname: kafka0
container_name: kafka0
ports:
- 9092:9092
- 9997:9997
environment:
KAFKA_BROKER_ID: 1
KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: PLAINTEXT:PLAINTEXT,CONTROLLER:PLAINTEXT,PLAINTEXT_HOST:PLAINTEXT
KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://kafka0:29092,PLAINTEXT_HOST://localhost:9092
KAFKA_INTER_BROKER_LISTENER_NAME: PLAINTEXT
KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 1
KAFKA_GROUP_INITIAL_REBALANCE_DELAY_MS: 0
KAFKA_TRANSACTION_STATE_LOG_MIN_ISR: 1
KAFKA_TRANSACTION_STATE_LOG_REPLICATION_FACTOR: 1
KAFKA_PROCESS_ROLES: 'broker,controller'
KAFKA_NODE_ID: 1
KAFKA_CONTROLLER_QUORUM_VOTERS: '1@kafka0:29093'
KAFKA_LISTENERS: 'PLAINTEXT://kafka0:29092,CONTROLLER://kafka0:29093,PLAINTEXT_HOST://0.0.0.0:9092'
KAFKA_CONTROLLER_LISTENER_NAMES: 'CONTROLLER'
KAFKA_LOG_DIRS: '/tmp/kraft-combined-logs'
KAFKA_JMX_PORT: 9997
KAFKA_JMX_OPTS: -Dcom.sun.management.jmxremote -Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.ssl=false -Djava.rmi.server.hostname=kafka0 -Dcom.sun.management.jmxremote.rmi.port=9997
volumes:
- ./scripts/update_run.sh:/tmp/update_run.sh
command: "bash -c 'if [ ! -f /tmp/update_run.sh ]; then echo \"ERROR: Did you forget the update_run.sh file that came with this docker-compose.yml file?\" && exit 1 ; else /tmp/update_run.sh && /etc/confluent/docker/run ; fi'"
schema-registry0:
image: confluentinc/cp-schema-registry:7.2.1.arm64
ports:
- 8085:8085
depends_on:
- kafka0
environment:
SCHEMA_REGISTRY_KAFKASTORE_BOOTSTRAP_SERVERS: PLAINTEXT://kafka0:29092
SCHEMA_REGISTRY_KAFKASTORE_SECURITY_PROTOCOL: PLAINTEXT
SCHEMA_REGISTRY_HOST_NAME: schema-registry0
SCHEMA_REGISTRY_LISTENERS: http://schema-registry0:8085
SCHEMA_REGISTRY_SCHEMA_REGISTRY_INTER_INSTANCE_PROTOCOL: "http"
SCHEMA_REGISTRY_LOG4J_ROOT_LOGLEVEL: INFO
SCHEMA_REGISTRY_KAFKASTORE_TOPIC: _schemas
kafka-connect0:
image: confluentinc/cp-kafka-connect:7.2.1.arm64
ports:
- 8083:8083
depends_on:
- kafka0
- schema-registry0
environment:
CONNECT_BOOTSTRAP_SERVERS: kafka0:29092
CONNECT_GROUP_ID: compose-connect-group
CONNECT_CONFIG_STORAGE_TOPIC: _connect_configs
CONNECT_CONFIG_STORAGE_REPLICATION_FACTOR: 1
CONNECT_OFFSET_STORAGE_TOPIC: _connect_offset
CONNECT_OFFSET_STORAGE_REPLICATION_FACTOR: 1
CONNECT_STATUS_STORAGE_TOPIC: _connect_status
CONNECT_STATUS_STORAGE_REPLICATION_FACTOR: 1
CONNECT_KEY_CONVERTER: org.apache.kafka.connect.storage.StringConverter
CONNECT_KEY_CONVERTER_SCHEMA_REGISTRY_URL: http://schema-registry0:8085
CONNECT_VALUE_CONVERTER: org.apache.kafka.connect.storage.StringConverter
CONNECT_VALUE_CONVERTER_SCHEMA_REGISTRY_URL: http://schema-registry0:8085
CONNECT_INTERNAL_KEY_CONVERTER: org.apache.kafka.connect.json.JsonConverter
CONNECT_INTERNAL_VALUE_CONVERTER: org.apache.kafka.connect.json.JsonConverter
CONNECT_REST_ADVERTISED_HOST_NAME: kafka-connect0
CONNECT_PLUGIN_PATH: "/usr/share/java,/usr/share/confluent-hub-components"
kafka-init-topics:
image: confluentinc/cp-kafka:7.2.1.arm64
volumes:
- ./data/message.json:/data/message.json
depends_on:
- kafka0
command: "bash -c 'echo Waiting for Kafka to be ready... && \
cub kafka-ready -b kafka0:29092 1 30 && \
kafka-topics --create --topic second.users --partitions 3 --replication-factor 1 --if-not-exists --bootstrap-server kafka0:29092 && \
kafka-topics --create --topic second.messages --partitions 2 --replication-factor 1 --if-not-exists --bootstrap-server kafka0:29092 && \
kafka-topics --create --topic first.messages --partitions 2 --replication-factor 1 --if-not-exists --bootstrap-server kafka0:29092 && \
kafka-console-producer --bootstrap-server kafka0:29092 --topic second.users < /data/message.json'"

View file

@ -8,40 +8,52 @@ services:
ports:
- 8080:8080
depends_on:
- kafka
- zookeeper0
- kafka0
environment:
KAFKA_CLUSTERS_0_NAME: local
KAFKA_CLUSTERS_0_BOOTSTRAPSERVERS: kafka:29092
KAFKA_CLUSTERS_0_METRICS_PORT: 9997
KAFKA_CLUSTERS_0_BOOTSTRAPSERVERS: kafka0:29092
KAFKA_CLUSTERS_0_ZOOKEEPER: zookeeper0:2181
KAFKA_CLUSTERS_0_JMXPORT: 9997
SERVER_SERVLET_CONTEXT_PATH: /kafkaui
AUTH_TYPE: "LOGIN_FORM"
SPRING_SECURITY_USER_NAME: admin
SPRING_SECURITY_USER_PASSWORD: pass
kafka:
image: confluentinc/cp-kafka:7.2.1
hostname: kafka
container_name: kafka
zookeeper0:
image: confluentinc/cp-zookeeper:5.2.4
environment:
ZOOKEEPER_CLIENT_PORT: 2181
ZOOKEEPER_TICK_TIME: 2000
ports:
- "9092:9092"
- "9997:9997"
- 2181:2181
kafka0:
image: confluentinc/cp-kafka:5.3.1
depends_on:
- zookeeper0
ports:
- 9092:9092
- 9997:9997
environment:
KAFKA_BROKER_ID: 1
KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: 'CONTROLLER:PLAINTEXT,PLAINTEXT:PLAINTEXT,PLAINTEXT_HOST:PLAINTEXT'
KAFKA_ADVERTISED_LISTENERS: 'PLAINTEXT://kafka:29092,PLAINTEXT_HOST://localhost:9092'
KAFKA_ZOOKEEPER_CONNECT: zookeeper0:2181
KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://kafka0:29092,PLAINTEXT_HOST://localhost:9092
KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: PLAINTEXT:PLAINTEXT,PLAINTEXT_HOST:PLAINTEXT
KAFKA_INTER_BROKER_LISTENER_NAME: PLAINTEXT
KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 1
KAFKA_GROUP_INITIAL_REBALANCE_DELAY_MS: 0
KAFKA_TRANSACTION_STATE_LOG_MIN_ISR: 1
KAFKA_TRANSACTION_STATE_LOG_REPLICATION_FACTOR: 1
KAFKA_JMX_PORT: 9997
KAFKA_JMX_HOSTNAME: localhost
KAFKA_PROCESS_ROLES: 'broker,controller'
KAFKA_NODE_ID: 1
KAFKA_CONTROLLER_QUORUM_VOTERS: '1@kafka:29093'
KAFKA_LISTENERS: 'PLAINTEXT://kafka:29092,CONTROLLER://kafka:29093,PLAINTEXT_HOST://0.0.0.0:9092'
KAFKA_INTER_BROKER_LISTENER_NAME: 'PLAINTEXT'
KAFKA_CONTROLLER_LISTENER_NAMES: 'CONTROLLER'
KAFKA_LOG_DIRS: '/tmp/kraft-combined-logs'
JMX_PORT: 9997
KAFKA_JMX_OPTS: -Dcom.sun.management.jmxremote -Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.ssl=false -Djava.rmi.server.hostname=kafka0 -Dcom.sun.management.jmxremote.rmi.port=9997
kafka-init-topics:
image: confluentinc/cp-kafka:5.3.1
volumes:
- ./scripts/update_run.sh:/tmp/update_run.sh
command: "bash -c 'if [ ! -f /tmp/update_run.sh ]; then echo \"ERROR: Did you forget the update_run.sh file that came with this docker-compose.yml file?\" && exit 1 ; else /tmp/update_run.sh && /etc/confluent/docker/run ; fi'"
- ./message.json:/data/message.json
depends_on:
- kafka0
command: "bash -c 'echo Waiting for Kafka to be ready... && \
cub kafka-ready -b kafka0:29092 1 30 && \
kafka-topics --create --topic second.users --partitions 3 --replication-factor 1 --if-not-exists --zookeeper zookeeper0:2181 && \
kafka-topics --create --topic second.messages --partitions 2 --replication-factor 1 --if-not-exists --zookeeper zookeeper0:2181 && \
kafka-topics --create --topic first.messages --partitions 2 --replication-factor 1 --if-not-exists --zookeeper zookeeper0:2181 && \
kafka-console-producer --broker-list kafka0:29092 -topic second.users < /data/message.json'"

View file

@ -1,62 +1,68 @@
---
version: "2"
version: '2'
services:
kafka-ui:
container_name: kafka-ui
image: provectuslabs/kafka-ui:latest
ports:
- 8080:8080
depends_on:
- zookeeper0
- zookeeper1
- kafka0
- kafka1
- schemaregistry0
- kafka-connect0
environment:
KAFKA_CLUSTERS_0_NAME: local
KAFKA_CLUSTERS_0_BOOTSTRAPSERVERS: kafka0:29092
KAFKA_CLUSTERS_0_METRICS_PORT: 9997
KAFKA_CLUSTERS_0_ZOOKEEPER: zookeeper0:2181
KAFKA_CLUSTERS_0_JMXPORT: 9997
KAFKA_CLUSTERS_0_SCHEMAREGISTRY: http://schemaregistry0:8085
KAFKA_CLUSTERS_0_KAFKACONNECT_0_NAME: first
KAFKA_CLUSTERS_0_KAFKACONNECT_0_ADDRESS: http://kafka-connect0:8083
KAFKA_CLUSTERS_0_KAFKACONNECT_0_USERNAME: admin
KAFKA_CLUSTERS_0_KAFKACONNECT_0_PASSWORD: admin-secret
KAFKA_CLUSTERS_0_KSQLDBSERVER: http://ksqldb:8088
zookeeper0:
image: confluentinc/cp-zookeeper:5.2.4
environment:
ZOOKEEPER_CLIENT_PORT: 2181
ZOOKEEPER_TICK_TIME: 2000
ports:
- 2181:2181
kafka0:
image: confluentinc/cp-kafka:7.2.1
hostname: kafka0
container_name: kafka0
image: confluentinc/cp-kafka:5.3.1
depends_on:
- zookeeper0
ports:
- "9092:9092"
- "9997:9997"
- 9092:9092
- 9997:9997
environment:
KAFKA_BROKER_ID: 1
KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: "CONTROLLER:PLAINTEXT,PLAINTEXT:PLAINTEXT,PLAINTEXT_HOST:PLAINTEXT"
KAFKA_ADVERTISED_LISTENERS: "PLAINTEXT://kafka0:29092,PLAINTEXT_HOST://localhost:9092"
KAFKA_ZOOKEEPER_CONNECT: zookeeper0:2181
KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://kafka0:29092,PLAINTEXT_HOST://localhost:9092
KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: PLAINTEXT:PLAINTEXT,PLAINTEXT_HOST:PLAINTEXT
KAFKA_INTER_BROKER_LISTENER_NAME: PLAINTEXT
KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 1
KAFKA_GROUP_INITIAL_REBALANCE_DELAY_MS: 0
KAFKA_TRANSACTION_STATE_LOG_MIN_ISR: 1
KAFKA_TRANSACTION_STATE_LOG_REPLICATION_FACTOR: 1
KAFKA_JMX_PORT: 9997
KAFKA_JMX_HOSTNAME: localhost
JMX_PORT: 9997
KAFKA_JMX_OPTS: -Dcom.sun.management.jmxremote -Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.ssl=false -Djava.rmi.server.hostname=kafka0 -Dcom.sun.management.jmxremote.rmi.port=9997
KAFKA_PROCESS_ROLES: "broker,controller"
KAFKA_NODE_ID: 1
KAFKA_CONTROLLER_QUORUM_VOTERS: "1@kafka0:29093"
KAFKA_LISTENERS: "PLAINTEXT://kafka0:29092,CONTROLLER://kafka0:29093,PLAINTEXT_HOST://0.0.0.0:9092"
KAFKA_INTER_BROKER_LISTENER_NAME: "PLAINTEXT"
KAFKA_CONTROLLER_LISTENER_NAMES: "CONTROLLER"
KAFKA_LOG_DIRS: "/tmp/kraft-combined-logs"
volumes:
- ./scripts/update_run.sh:/tmp/update_run.sh
command: 'bash -c ''if [ ! -f /tmp/update_run.sh ]; then echo "ERROR: Did you forget the update_run.sh file that came with this docker-compose.yml file?" && exit 1 ; else /tmp/update_run.sh && /etc/confluent/docker/run ; fi'''
schemaregistry0:
image: confluentinc/cp-schema-registry:7.2.1
image: confluentinc/cp-schema-registry:5.5.0
ports:
- 8085:8085
depends_on:
- zookeeper0
- kafka0
environment:
SCHEMA_REGISTRY_KAFKASTORE_BOOTSTRAP_SERVERS: PLAINTEXT://kafka0:29092
SCHEMA_REGISTRY_KAFKASTORE_CONNECTION_URL: zookeeper0:2181
SCHEMA_REGISTRY_KAFKASTORE_SECURITY_PROTOCOL: PLAINTEXT
SCHEMA_REGISTRY_HOST_NAME: schemaregistry0
SCHEMA_REGISTRY_LISTENERS: http://schemaregistry0:8085
@ -65,11 +71,12 @@ services:
SCHEMA_REGISTRY_LOG4J_ROOT_LOGLEVEL: INFO
SCHEMA_REGISTRY_KAFKASTORE_TOPIC: _schemas
kafka-connect0:
build:
context: ./kafka-connect
args:
image: confluentinc/cp-kafka-connect:7.2.1
image: confluentinc/cp-kafka-connect:6.0.1
ports:
- 8083:8083
depends_on:
@ -93,22 +100,51 @@ services:
CONNECT_INTERNAL_KEY_CONVERTER: org.apache.kafka.connect.json.JsonConverter
CONNECT_INTERNAL_VALUE_CONVERTER: org.apache.kafka.connect.json.JsonConverter
CONNECT_REST_ADVERTISED_HOST_NAME: kafka-connect0
CONNECT_REST_PORT: 8083
CONNECT_PLUGIN_PATH: "/usr/share/java,/usr/share/confluent-hub-components"
CONNECT_REST_EXTENSION_CLASSES: "org.apache.kafka.connect.rest.basic.auth.extension.BasicAuthSecurityRestExtension"
KAFKA_OPTS: "-Djava.security.auth.login.config=/conf/kafka_connect.jaas"
# AWS_ACCESS_KEY_ID: ""
# AWS_SECRET_ACCESS_KEY: ""
# AWS_ACCESS_KEY_ID: ""
# AWS_SECRET_ACCESS_KEY: ""
kafka-init-topics:
image: confluentinc/cp-kafka:7.2.1
image: confluentinc/cp-kafka:5.3.1
volumes:
- ./data/message.json:/data/message.json
- ./message.json:/data/message.json
depends_on:
- kafka1
command: "bash -c 'echo Waiting for Kafka to be ready... && \
cub kafka-ready -b kafka1:29092 1 30 && \
kafka-topics --create --topic second.users --partitions 3 --replication-factor 1 --if-not-exists --zookeeper zookeeper1:2181 && \
kafka-topics --create --topic second.messages --partitions 2 --replication-factor 1 --if-not-exists --zookeeper zookeeper1:2181 && \
kafka-topics --create --topic first.messages --partitions 2 --replication-factor 1 --if-not-exists --zookeeper zookeeper0:2181 && \
kafka-console-producer --broker-list kafka1:29092 -topic second.users < /data/message.json'"
create-connectors:
image: ellerbrock/alpine-bash-curl-ssl
depends_on:
- postgres-db
- kafka-connect0
volumes:
- ./connectors:/connectors
command: bash -c '/connectors/start.sh'
ksqldb:
image: confluentinc/ksqldb-server:0.18.0
depends_on:
- kafka0
command: "bash -c 'echo Waiting for Kafka to be ready... && \
cub kafka-ready -b kafka0:29092 1 30 && \
kafka-topics --create --topic users --partitions 3 --replication-factor 1 --if-not-exists --bootstrap-server kafka0:29092 && \
kafka-topics --create --topic messages --partitions 2 --replication-factor 1 --if-not-exists --bootstrap-server kafka0:29092 && \
kafka-console-producer --bootstrap-server kafka0:29092 --topic users < /data/message.json'"
- kafka-connect0
- schemaregistry0
ports:
- 8088:8088
environment:
KSQL_CUB_KAFKA_TIMEOUT: 120
KSQL_LISTENERS: http://0.0.0.0:8088
KSQL_BOOTSTRAP_SERVERS: PLAINTEXT://kafka0:29092
KSQL_KSQL_LOGGING_PROCESSING_STREAM_AUTO_CREATE: "true"
KSQL_KSQL_LOGGING_PROCESSING_TOPIC_AUTO_CREATE: "true"
KSQL_KSQL_CONNECT_URL: http://kafka-connect0:8083
KSQL_KSQL_SCHEMA_REGISTRY_URL: http://schemaregistry0:8085
KSQL_KSQL_SERVICE_ID: my_ksql_1
KSQL_KSQL_HIDDEN_TOPICS: '^_.*'
KSQL_CACHE_MAX_BYTES_BUFFERING: 0

View file

@ -0,0 +1,201 @@
---
version: '2'
services:
kafka-ui:
container_name: kafka-ui
image: provectuslabs/kafka-ui:latest
ports:
- 8080:8080
depends_on:
- zookeeper0
- zookeeper1
- kafka0
- kafka1
- schemaregistry0
- kafka-connect0
environment:
KAFKA_CLUSTERS_0_NAME: local
KAFKA_CLUSTERS_0_BOOTSTRAPSERVERS: kafka0:29092
KAFKA_CLUSTERS_0_ZOOKEEPER: zookeeper0:2181
KAFKA_CLUSTERS_0_JMXPORT: 9997
KAFKA_CLUSTERS_0_SCHEMAREGISTRY: http://schemaregistry0:8085
KAFKA_CLUSTERS_0_KAFKACONNECT_0_NAME: first
KAFKA_CLUSTERS_0_KAFKACONNECT_0_ADDRESS: http://kafka-connect0:8083
KAFKA_CLUSTERS_0_KSQLDBSERVER: http://ksqldb:8088
KAFKA_CLUSTERS_1_NAME: secondLocal
KAFKA_CLUSTERS_1_BOOTSTRAPSERVERS: kafka1:29092
KAFKA_CLUSTERS_1_ZOOKEEPER: zookeeper1:2181
KAFKA_CLUSTERS_1_JMXPORT: 9998
KAFKA_CLUSTERS_1_SCHEMAREGISTRY: http://schemaregistry1:8085
zookeeper0:
image: confluentinc/cp-zookeeper:5.2.4
environment:
ZOOKEEPER_CLIENT_PORT: 2181
ZOOKEEPER_TICK_TIME: 2000
ports:
- 2181:2181
kafka0:
image: confluentinc/cp-kafka:5.3.1
depends_on:
- zookeeper0
ports:
- 9092:9092
- 9997:9997
environment:
KAFKA_BROKER_ID: 1
KAFKA_ZOOKEEPER_CONNECT: zookeeper0:2181
KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://kafka0:29092,PLAINTEXT_HOST://localhost:9092
KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: PLAINTEXT:PLAINTEXT,PLAINTEXT_HOST:PLAINTEXT
KAFKA_INTER_BROKER_LISTENER_NAME: PLAINTEXT
KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 1
KAFKA_TRANSACTION_STATE_LOG_MIN_ISR: 1
KAFKA_TRANSACTION_STATE_LOG_REPLICATION_FACTOR: 1
JMX_PORT: 9997
KAFKA_JMX_OPTS: -Dcom.sun.management.jmxremote -Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.ssl=false -Djava.rmi.server.hostname=kafka0 -Dcom.sun.management.jmxremote.rmi.port=9997
zookeeper1:
image: confluentinc/cp-zookeeper:5.2.4
environment:
ZOOKEEPER_CLIENT_PORT: 2181
ZOOKEEPER_TICK_TIME: 2000
kafka1:
image: confluentinc/cp-kafka:5.3.1
depends_on:
- zookeeper1
ports:
- 9093:9093
- 9998:9998
environment:
KAFKA_BROKER_ID: 1
KAFKA_ZOOKEEPER_CONNECT: zookeeper1:2181
KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://kafka1:29092,PLAINTEXT_HOST://localhost:9093
KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: PLAINTEXT:PLAINTEXT,PLAINTEXT_HOST:PLAINTEXT
KAFKA_INTER_BROKER_LISTENER_NAME: PLAINTEXT
KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 1
KAFKA_TRANSACTION_STATE_LOG_MIN_ISR: 1
KAFKA_TRANSACTION_STATE_LOG_REPLICATION_FACTOR: 1
JMX_PORT: 9998
KAFKA_JMX_OPTS: -Dcom.sun.management.jmxremote -Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.ssl=false -Djava.rmi.server.hostname=kafka1 -Dcom.sun.management.jmxremote.rmi.port=9998
schemaregistry0:
image: confluentinc/cp-schema-registry:5.5.0
ports:
- 8085:8085
depends_on:
- zookeeper0
- kafka0
environment:
SCHEMA_REGISTRY_KAFKASTORE_BOOTSTRAP_SERVERS: PLAINTEXT://kafka0:29092
SCHEMA_REGISTRY_KAFKASTORE_CONNECTION_URL: zookeeper0:2181
SCHEMA_REGISTRY_KAFKASTORE_SECURITY_PROTOCOL: PLAINTEXT
SCHEMA_REGISTRY_HOST_NAME: schemaregistry0
SCHEMA_REGISTRY_LISTENERS: http://schemaregistry0:8085
SCHEMA_REGISTRY_SCHEMA_REGISTRY_INTER_INSTANCE_PROTOCOL: "http"
SCHEMA_REGISTRY_LOG4J_ROOT_LOGLEVEL: INFO
SCHEMA_REGISTRY_KAFKASTORE_TOPIC: _schemas
schemaregistry1:
image: confluentinc/cp-schema-registry:5.5.0
ports:
- 18085:8085
depends_on:
- zookeeper1
- kafka1
environment:
SCHEMA_REGISTRY_KAFKASTORE_BOOTSTRAP_SERVERS: PLAINTEXT://kafka1:29092
SCHEMA_REGISTRY_KAFKASTORE_CONNECTION_URL: zookeeper1:2181
SCHEMA_REGISTRY_KAFKASTORE_SECURITY_PROTOCOL: PLAINTEXT
SCHEMA_REGISTRY_HOST_NAME: schemaregistry1
SCHEMA_REGISTRY_LISTENERS: http://schemaregistry1:8085
SCHEMA_REGISTRY_SCHEMA_REGISTRY_INTER_INSTANCE_PROTOCOL: "http"
SCHEMA_REGISTRY_LOG4J_ROOT_LOGLEVEL: INFO
SCHEMA_REGISTRY_KAFKASTORE_TOPIC: _schemas
kafka-connect0:
build:
context: ./kafka-connect
args:
image: confluentinc/cp-kafka-connect:6.0.1
ports:
- 8083:8083
depends_on:
- kafka0
- schemaregistry0
environment:
CONNECT_BOOTSTRAP_SERVERS: kafka0:29092
CONNECT_GROUP_ID: compose-connect-group
CONNECT_CONFIG_STORAGE_TOPIC: _connect_configs
CONNECT_CONFIG_STORAGE_REPLICATION_FACTOR: 1
CONNECT_OFFSET_STORAGE_TOPIC: _connect_offset
CONNECT_OFFSET_STORAGE_REPLICATION_FACTOR: 1
CONNECT_STATUS_STORAGE_TOPIC: _connect_status
CONNECT_STATUS_STORAGE_REPLICATION_FACTOR: 1
CONNECT_KEY_CONVERTER: org.apache.kafka.connect.storage.StringConverter
CONNECT_KEY_CONVERTER_SCHEMA_REGISTRY_URL: http://schemaregistry0:8085
CONNECT_VALUE_CONVERTER: org.apache.kafka.connect.storage.StringConverter
CONNECT_VALUE_CONVERTER_SCHEMA_REGISTRY_URL: http://schemaregistry0:8085
CONNECT_INTERNAL_KEY_CONVERTER: org.apache.kafka.connect.json.JsonConverter
CONNECT_INTERNAL_VALUE_CONVERTER: org.apache.kafka.connect.json.JsonConverter
CONNECT_REST_ADVERTISED_HOST_NAME: kafka-connect0
CONNECT_PLUGIN_PATH: "/usr/share/java,/usr/share/confluent-hub-components"
# AWS_ACCESS_KEY_ID: ""
# AWS_SECRET_ACCESS_KEY: ""
kafka-init-topics:
image: confluentinc/cp-kafka:5.3.1
volumes:
- ./message.json:/data/message.json
depends_on:
- kafka1
command: "bash -c 'echo Waiting for Kafka to be ready... && \
cub kafka-ready -b kafka1:29092 1 30 && \
kafka-topics --create --topic second.users --partitions 3 --replication-factor 1 --if-not-exists --zookeeper zookeeper1:2181 && \
kafka-topics --create --topic second.messages --partitions 2 --replication-factor 1 --if-not-exists --zookeeper zookeeper1:2181 && \
kafka-topics --create --topic first.messages --partitions 2 --replication-factor 1 --if-not-exists --zookeeper zookeeper0:2181 && \
kafka-console-producer --broker-list kafka1:29092 -topic second.users < /data/message.json'"
postgres-db:
build:
context: ./postgres
args:
image: postgres:9.6.22
ports:
- 5432:5432
environment:
POSTGRES_USER: 'dev_user'
POSTGRES_PASSWORD: '12345'
create-connectors:
image: ellerbrock/alpine-bash-curl-ssl
depends_on:
- postgres-db
- kafka-connect0
volumes:
- ./connectors:/connectors
command: bash -c '/connectors/start.sh'
ksqldb:
image: confluentinc/ksqldb-server:0.18.0
depends_on:
- kafka0
- kafka-connect0
- schemaregistry0
ports:
- 8088:8088
environment:
KSQL_CUB_KAFKA_TIMEOUT: 120
KSQL_LISTENERS: http://0.0.0.0:8088
KSQL_BOOTSTRAP_SERVERS: PLAINTEXT://kafka0:29092
KSQL_KSQL_LOGGING_PROCESSING_STREAM_AUTO_CREATE: "true"
KSQL_KSQL_LOGGING_PROCESSING_TOPIC_AUTO_CREATE: "true"
KSQL_KSQL_CONNECT_URL: http://kafka-connect0:8083
KSQL_KSQL_SCHEMA_REGISTRY_URL: http://schemaregistry0:8085
KSQL_KSQL_SERVICE_ID: my_ksql_1
KSQL_KSQL_HIDDEN_TOPICS: '^_.*'
KSQL_CACHE_MAX_BYTES_BUFFERING: 0

View file

@ -7,48 +7,56 @@ services:
image: provectuslabs/kafka-ui:latest
ports:
- 8080:8080
- 5005:5005
depends_on:
- zookeeper0
- kafka0
- schemaregistry0
- kafka-connect0
environment:
KAFKA_CLUSTERS_0_NAME: local
KAFKA_CLUSTERS_0_BOOTSTRAPSERVERS: kafka0:29092
KAFKA_CLUSTERS_0_ZOOKEEPER: zookeeper0:2181
KAFKA_CLUSTERS_0_SCHEMAREGISTRY: http://schemaregistry0:8085
KAFKA_CLUSTERS_0_KAFKACONNECT_0_NAME: first
KAFKA_CLUSTERS_0_KAFKACONNECT_0_ADDRESS: http://kafka-connect0:8083
KAFKA_CLUSTERS_0_METRICS_PORT: 9997
KAFKA_CLUSTERS_0_METRICS_USERNAME: root
KAFKA_CLUSTERS_0_METRICS_PASSWORD: password
KAFKA_CLUSTERS_0_METRICS_KEYSTORE_LOCATION: /jmx/clientkeystore
KAFKA_CLUSTERS_0_METRICS_KEYSTORE_PASSWORD: '12345678'
KAFKA_CLUSTERS_0_SSL_TRUSTSTORE_LOCATION: /jmx/clienttruststore
KAFKA_CLUSTERS_0_SSL_TRUSTSTORE_PASSWORD: '12345678'
KAFKA_CLUSTERS_0_JMXPORT: 9997
KAFKA_CLUSTERS_0_JMXSSL: 'true'
KAFKA_CLUSTERS_0_JMXUSERNAME: root
KAFKA_CLUSTERS_0_JMXPASSWORD: password
JAVA_OPTS: >-
-agentlib:jdwp=transport=dt_socket,server=y,suspend=n,address=*:5005
-Djavax.net.ssl.trustStore=/jmx/clienttruststore
-Djavax.net.ssl.trustStorePassword=12345678
-Djavax.net.ssl.keyStore=/jmx/clientkeystore
-Djavax.net.ssl.keyStorePassword=12345678
volumes:
- ./jmx/clienttruststore:/jmx/clienttruststore
- ./jmx/clientkeystore:/jmx/clientkeystore
zookeeper0:
image: confluentinc/cp-zookeeper:5.2.4
environment:
ZOOKEEPER_CLIENT_PORT: 2181
ZOOKEEPER_TICK_TIME: 2000
ports:
- 2181:2181
kafka0:
image: confluentinc/cp-kafka:7.2.1
hostname: kafka0
container_name: kafka0
image: confluentinc/cp-kafka:5.3.1
depends_on:
- zookeeper0
ports:
- 9092:9092
- 9997:9997
environment:
KAFKA_BROKER_ID: 1
KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: 'CONTROLLER:PLAINTEXT,PLAINTEXT:PLAINTEXT,PLAINTEXT_HOST:PLAINTEXT'
KAFKA_ADVERTISED_LISTENERS: 'PLAINTEXT://kafka0:29092,PLAINTEXT_HOST://localhost:9092'
KAFKA_ZOOKEEPER_CONNECT: zookeeper0:2181
KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://kafka0:29092,PLAINTEXT_HOST://localhost:9092
KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: PLAINTEXT:PLAINTEXT,PLAINTEXT_HOST:PLAINTEXT
KAFKA_INTER_BROKER_LISTENER_NAME: PLAINTEXT
KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 1
KAFKA_GROUP_INITIAL_REBALANCE_DELAY_MS: 0
KAFKA_TRANSACTION_STATE_LOG_MIN_ISR: 1
KAFKA_TRANSACTION_STATE_LOG_REPLICATION_FACTOR: 1
KAFKA_JMX_PORT: 9997
KAFKA_PROCESS_ROLES: 'broker,controller'
KAFKA_NODE_ID: 1
KAFKA_CONTROLLER_QUORUM_VOTERS: '1@kafka0:29093'
KAFKA_LISTENERS: 'PLAINTEXT://kafka0:29092,CONTROLLER://kafka0:29093,PLAINTEXT_HOST://0.0.0.0:9092'
KAFKA_INTER_BROKER_LISTENER_NAME: 'PLAINTEXT'
KAFKA_CONTROLLER_LISTENER_NAMES: 'CONTROLLER'
KAFKA_LOG_DIRS: '/tmp/kraft-combined-logs'
JMX_PORT: 9997
# CHMOD 700 FOR JMXREMOTE.* FILES
KAFKA_JMX_OPTS: >-
-Dcom.sun.management.jmxremote
@ -64,10 +72,65 @@ services:
-Dcom.sun.management.jmxremote.access.file=/jmx/jmxremote.access
-Dcom.sun.management.jmxremote.rmi.port=9997
-Djava.rmi.server.hostname=kafka0
-Djava.rmi.server.logCalls=true
# -Djavax.net.debug=ssl:handshake
volumes:
- ./jmx/serverkeystore:/jmx/serverkeystore
- ./jmx/servertruststore:/jmx/servertruststore
- ./jmx/jmxremote.password:/jmx/jmxremote.password
- ./jmx/jmxremote.access:/jmx/jmxremote.access
- ./scripts/update_run.sh:/tmp/update_run.sh
command: "bash -c 'if [ ! -f /tmp/update_run.sh ]; then echo \"ERROR: Did you forget the update_run.sh file that came with this docker-compose.yml file?\" && exit 1 ; else /tmp/update_run.sh && /etc/confluent/docker/run ; fi'"
- ./jmx/serverkeystore:/jmx/serverkeystore
- ./jmx/servertruststore:/jmx/servertruststore
- ./jmx/jmxremote.password:/jmx/jmxremote.password
- ./jmx/jmxremote.access:/jmx/jmxremote.access
schemaregistry0:
image: confluentinc/cp-schema-registry:5.5.0
ports:
- 8085:8085
depends_on:
- zookeeper0
- kafka0
environment:
SCHEMA_REGISTRY_KAFKASTORE_BOOTSTRAP_SERVERS: PLAINTEXT://kafka0:29092
SCHEMA_REGISTRY_KAFKASTORE_CONNECTION_URL: zookeeper0:2181
SCHEMA_REGISTRY_KAFKASTORE_SECURITY_PROTOCOL: PLAINTEXT
SCHEMA_REGISTRY_HOST_NAME: schemaregistry0
SCHEMA_REGISTRY_LISTENERS: http://schemaregistry0:8085
SCHEMA_REGISTRY_SCHEMA_REGISTRY_INTER_INSTANCE_PROTOCOL: "http"
SCHEMA_REGISTRY_LOG4J_ROOT_LOGLEVEL: INFO
SCHEMA_REGISTRY_KAFKASTORE_TOPIC: _schemas
kafka-connect0:
image: confluentinc/cp-kafka-connect:6.0.1
ports:
- 8083:8083
depends_on:
- kafka0
- schemaregistry0
environment:
CONNECT_BOOTSTRAP_SERVERS: kafka0:29092
CONNECT_GROUP_ID: compose-connect-group
CONNECT_CONFIG_STORAGE_TOPIC: _connect_configs
CONNECT_CONFIG_STORAGE_REPLICATION_FACTOR: 1
CONNECT_OFFSET_STORAGE_TOPIC: _connect_offset
CONNECT_OFFSET_STORAGE_REPLICATION_FACTOR: 1
CONNECT_STATUS_STORAGE_TOPIC: _connect_status
CONNECT_STATUS_STORAGE_REPLICATION_FACTOR: 1
CONNECT_KEY_CONVERTER: org.apache.kafka.connect.storage.StringConverter
CONNECT_KEY_CONVERTER_SCHEMA_REGISTRY_URL: http://schemaregistry0:8085
CONNECT_VALUE_CONVERTER: org.apache.kafka.connect.storage.StringConverter
CONNECT_VALUE_CONVERTER_SCHEMA_REGISTRY_URL: http://schemaregistry0:8085
CONNECT_INTERNAL_KEY_CONVERTER: org.apache.kafka.connect.json.JsonConverter
CONNECT_INTERNAL_VALUE_CONVERTER: org.apache.kafka.connect.json.JsonConverter
CONNECT_REST_ADVERTISED_HOST_NAME: kafka-connect0
CONNECT_PLUGIN_PATH: "/usr/share/java,/usr/share/confluent-hub-components"
kafka-init-topics:
image: confluentinc/cp-kafka:5.3.1
volumes:
- ./message.json:/data/message.json
depends_on:
- kafka0
command: "bash -c 'echo Waiting for Kafka to be ready... && \
cub kafka-ready -b kafka0:29092 1 30 && \
kafka-topics --create --topic second.users --partitions 3 --replication-factor 1 --if-not-exists --zookeeper zookeeper0:2181 && \
kafka-topics --create --topic first.messages --partitions 2 --replication-factor 1 --if-not-exists --zookeeper zookeeper0:2181 && \
kafka-console-producer --broker-list kafka0:29092 -topic second.users < /data/message.json'"

View file

@ -0,0 +1,19 @@
---
version: '2'
services:
nginx:
image: nginx:latest
volumes:
- ./proxy.conf:/etc/nginx/conf.d/default.conf
ports:
- 8080:80
kafka-ui:
container_name: kafka-ui
image: provectuslabs/kafka-ui:latest
ports:
- 8082:8080
environment:
KAFKA_CLUSTERS_0_NAME: local
KAFKA_CLUSTERS_0_BOOTSTRAPSERVERS: kafka:9092
SERVER_SERVLET_CONTEXT_PATH: /kafka-ui

View file

@ -8,45 +8,45 @@ services:
ports:
- 8080:8080
depends_on:
- zookeeper
- kafka
environment:
KAFKA_CLUSTERS_0_NAME: local
KAFKA_CLUSTERS_0_BOOTSTRAPSERVERS: kafka:29092
# SERVER_SERVLET_CONTEXT_PATH: "/kafkaui"
KAFKA_CLUSTERS_0_BOOTSTRAPSERVERS: kafka:9092
KAFKA_CLUSTERS_0_ZOOKEEPER: zookeeper:2181
KAFKA_CLUSTERS_0_PROPERTIES_SECURITY_PROTOCOL: SASL_PLAINTEXT
KAFKA_CLUSTERS_0_PROPERTIES_SASL_MECHANISM: PLAIN
KAFKA_CLUSTERS_0_PROPERTIES_SASL_JAAS_CONFIG: 'org.apache.kafka.common.security.plain.PlainLoginModule required username="admin" password="admin-secret";'
DYNAMIC_CONFIG_ENABLED: true # not necessary for sasl auth, added for tests
zookeeper:
image: confluentinc/cp-zookeeper:5.2.4
environment:
ZOOKEEPER_CLIENT_PORT: 2181
ZOOKEEPER_TICK_TIME: 2000
ports:
- 2181:2181
kafka:
image: confluentinc/cp-kafka:7.2.1
image: wurstmeister/kafka:latest
hostname: kafka
container_name: kafka
depends_on:
- zookeeper
ports:
- "9092:9092"
- "9997:9997"
- '9092:9092'
environment:
KAFKA_BROKER_ID: 1
KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: 'CONTROLLER:PLAINTEXT,SASL_PLAINTEXT:SASL_PLAINTEXT,PLAINTEXT_HOST:PLAINTEXT'
KAFKA_ADVERTISED_LISTENERS: 'SASL_PLAINTEXT://kafka:29092,PLAINTEXT_HOST://localhost:9092'
KAFKA_ZOOKEEPER_CONNECT: 'zookeeper:2181'
KAFKA_LISTENERS: SASL_PLAINTEXT://kafka:9092
KAFKA_ADVERTISED_LISTENERS: SASL_PLAINTEXT://kafka:9092
KAFKA_AUTO_CREATE_TOPICS_ENABLE: 'true'
ALLOW_PLAINTEXT_LISTENER: 'yes'
KAFKA_OPTS: "-Djava.security.auth.login.config=/etc/kafka/jaas/kafka_server.conf"
KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 1
KAFKA_GROUP_INITIAL_REBALANCE_DELAY_MS: 0
KAFKA_TRANSACTION_STATE_LOG_MIN_ISR: 1
KAFKA_TRANSACTION_STATE_LOG_REPLICATION_FACTOR: 1
KAFKA_JMX_PORT: 9997
KAFKA_JMX_HOSTNAME: localhost
KAFKA_PROCESS_ROLES: 'broker,controller'
KAFKA_NODE_ID: 1
KAFKA_CONTROLLER_QUORUM_VOTERS: '1@kafka:29093'
KAFKA_LISTENERS: 'SASL_PLAINTEXT://kafka:29092,CONTROLLER://kafka:29093,PLAINTEXT_HOST://0.0.0.0:9092'
KAFKA_INTER_BROKER_LISTENER_NAME: 'SASL_PLAINTEXT'
KAFKA_SASL_ENABLED_MECHANISMS: 'PLAIN'
KAFKA_SASL_MECHANISM_INTER_BROKER_PROTOCOL: 'PLAIN'
KAFKA_CONTROLLER_LISTENER_NAMES: 'CONTROLLER'
KAFKA_LOG_DIRS: '/tmp/kraft-combined-logs'
KAFKA_SECURITY_PROTOCOL: 'SASL_PLAINTEXT'
KAFKA_SUPER_USERS: 'User:admin,User:enzo'
KAFKA_AUTHORIZER_CLASS_NAME: kafka.security.auth.SimpleAclAuthorizer
KAFKA_INTER_BROKER_LISTENER_NAME: SASL_PLAINTEXT
KAFKA_SASL_ENABLED_MECHANISMS: PLAIN
KAFKA_SASL_MECHANISM_INTER_BROKER_PROTOCOL: PLAIN
KAFKA_SECURITY_PROTOCOL: SASL_PLAINTEXT
KAFKA_SUPER_USERS: User:admin,User:enzo
KAFKA_ALLOW_EVERYONE_IF_NO_ACL_FOUND: 'true'
volumes:
- ./scripts/update_run.sh:/tmp/update_run.sh
- ./jaas:/etc/kafka/jaas
command: "bash -c 'if [ ! -f /tmp/update_run.sh ]; then echo \"ERROR: Did you forget the update_run.sh file that came with this docker-compose.yml file?\" && exit 1 ; else /tmp/update_run.sh && /etc/confluent/docker/run ; fi'"
- ./jaas:/etc/kafka/jaas

View file

@ -1,113 +0,0 @@
---
version: '2'
services:
kafka-ui:
container_name: kafka-ui
image: provectuslabs/kafka-ui:latest
ports:
- 8080:8080
depends_on:
- kafka0
- schemaregistry0
environment:
kafka.clusters.0.name: SerdeExampleCluster
kafka.clusters.0.bootstrapServers: kafka0:29092
kafka.clusters.0.schemaRegistry: http://schemaregistry0:8085
# optional SSL settings for cluster (will be used by SchemaRegistry serde, if set)
#kafka.clusters.0.ssl.keystoreLocation: /kafka.keystore.jks
#kafka.clusters.0.ssl.keystorePassword: "secret"
#kafka.clusters.0.ssl.truststoreLocation: /kafka.truststore.jks
#kafka.clusters.0.ssl.truststorePassword: "secret"
# optional auth properties for SR
#kafka.clusters.0.schemaRegistryAuth.username: "use"
#kafka.clusters.0.schemaRegistryAuth.password: "pswrd"
kafka.clusters.0.defaultKeySerde: Int32 #optional
kafka.clusters.0.defaultValueSerde: String #optional
kafka.clusters.0.serde.0.name: ProtobufFile
kafka.clusters.0.serde.0.topicKeysPattern: "topic1"
kafka.clusters.0.serde.0.topicValuesPattern: "topic1"
kafka.clusters.0.serde.0.properties.protobufFilesDir: /protofiles/
kafka.clusters.0.serde.0.properties.protobufMessageNameForKey: test.MyKey # default type for keys
kafka.clusters.0.serde.0.properties.protobufMessageName: test.MyValue # default type for values
kafka.clusters.0.serde.0.properties.protobufMessageNameForKeyByTopic.topic1: test.MySpecificTopicKey # keys type for topic "topic1"
kafka.clusters.0.serde.0.properties.protobufMessageNameByTopic.topic1: test.MySpecificTopicValue # values type for topic "topic1"
kafka.clusters.0.serde.1.name: String
#kafka.clusters.0.serde.1.properties.encoding: "UTF-16" #optional, default is UTF-8
kafka.clusters.0.serde.1.topicValuesPattern: "json-events|text-events"
kafka.clusters.0.serde.2.name: AsciiString
kafka.clusters.0.serde.2.className: com.provectus.kafka.ui.serdes.builtin.StringSerde
kafka.clusters.0.serde.2.properties.encoding: "ASCII"
kafka.clusters.0.serde.3.name: SchemaRegistry # will be configured automatically using cluster SR
kafka.clusters.0.serde.3.topicValuesPattern: "sr-topic.*"
kafka.clusters.0.serde.4.name: AnotherSchemaRegistry
kafka.clusters.0.serde.4.className: com.provectus.kafka.ui.serdes.builtin.sr.SchemaRegistrySerde
kafka.clusters.0.serde.4.properties.url: http://schemaregistry0:8085
kafka.clusters.0.serde.4.properties.keySchemaNameTemplate: "%s-key"
kafka.clusters.0.serde.4.properties.schemaNameTemplate: "%s-value"
#kafka.clusters.0.serde.4.topicValuesPattern: "sr2-topic.*"
# optional auth and ssl properties for SR (overrides cluster-level):
#kafka.clusters.0.serde.4.properties.username: "user"
#kafka.clusters.0.serde.4.properties.password: "passw"
#kafka.clusters.0.serde.4.properties.keystoreLocation: /kafka.keystore.jks
#kafka.clusters.0.serde.4.properties.keystorePassword: "secret"
#kafka.clusters.0.serde.4.properties.truststoreLocation: /kafka.truststore.jks
#kafka.clusters.0.serde.4.properties.truststorePassword: "secret"
kafka.clusters.0.serde.5.name: UInt64
kafka.clusters.0.serde.5.topicKeysPattern: "topic-with-uint64keys"
volumes:
- ./proto:/protofiles
kafka0:
image: confluentinc/cp-kafka:7.2.1
hostname: kafka0
container_name: kafka0
ports:
- "9092:9092"
- "9997:9997"
environment:
KAFKA_BROKER_ID: 1
KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: 'CONTROLLER:PLAINTEXT,PLAINTEXT:PLAINTEXT,PLAINTEXT_HOST:PLAINTEXT'
KAFKA_ADVERTISED_LISTENERS: 'PLAINTEXT://kafka0:29092,PLAINTEXT_HOST://localhost:9092'
KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 1
KAFKA_GROUP_INITIAL_REBALANCE_DELAY_MS: 0
KAFKA_TRANSACTION_STATE_LOG_MIN_ISR: 1
KAFKA_TRANSACTION_STATE_LOG_REPLICATION_FACTOR: 1
KAFKA_JMX_PORT: 9997
KAFKA_JMX_HOSTNAME: localhost
KAFKA_JMX_OPTS: -Dcom.sun.management.jmxremote -Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.ssl=false -Djava.rmi.server.hostname=kafka0 -Dcom.sun.management.jmxremote.rmi.port=9997
KAFKA_PROCESS_ROLES: 'broker,controller'
KAFKA_NODE_ID: 1
KAFKA_CONTROLLER_QUORUM_VOTERS: '1@kafka0:29093'
KAFKA_LISTENERS: 'PLAINTEXT://kafka0:29092,CONTROLLER://kafka0:29093,PLAINTEXT_HOST://0.0.0.0:9092'
KAFKA_INTER_BROKER_LISTENER_NAME: 'PLAINTEXT'
KAFKA_CONTROLLER_LISTENER_NAMES: 'CONTROLLER'
KAFKA_LOG_DIRS: '/tmp/kraft-combined-logs'
volumes:
- ./scripts/update_run.sh:/tmp/update_run.sh
command: "bash -c 'if [ ! -f /tmp/update_run.sh ]; then echo \"ERROR: Did you forget the update_run.sh file that came with this docker-compose.yml file?\" && exit 1 ; else /tmp/update_run.sh && /etc/confluent/docker/run ; fi'"
schemaregistry0:
image: confluentinc/cp-schema-registry:7.2.1
ports:
- 8085:8085
depends_on:
- kafka0
environment:
SCHEMA_REGISTRY_KAFKASTORE_BOOTSTRAP_SERVERS: PLAINTEXT://kafka0:29092
SCHEMA_REGISTRY_KAFKASTORE_SECURITY_PROTOCOL: PLAINTEXT
SCHEMA_REGISTRY_HOST_NAME: schemaregistry0
SCHEMA_REGISTRY_LISTENERS: http://schemaregistry0:8085
SCHEMA_REGISTRY_SCHEMA_REGISTRY_INTER_INSTANCE_PROTOCOL: "http"
SCHEMA_REGISTRY_LOG4J_ROOT_LOGLEVEL: INFO
SCHEMA_REGISTRY_KAFKASTORE_TOPIC: _schemas

View file

@ -1,44 +0,0 @@
---
version: '2'
services:
kafka0:
image: confluentinc/cp-kafka:7.2.1
hostname: kafka0
container_name: kafka0
ports:
- "9092:9092"
- "11001:11001"
environment:
KAFKA_BROKER_ID: 1
KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: 'CONTROLLER:PLAINTEXT,PLAINTEXT:PLAINTEXT,PLAINTEXT_HOST:PLAINTEXT'
KAFKA_ADVERTISED_LISTENERS: 'PLAINTEXT://kafka0:29092,PLAINTEXT_HOST://localhost:9092'
KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 1
KAFKA_GROUP_INITIAL_REBALANCE_DELAY_MS: 0
KAFKA_TRANSACTION_STATE_LOG_MIN_ISR: 1
KAFKA_TRANSACTION_STATE_LOG_REPLICATION_FACTOR: 1
KAFKA_PROCESS_ROLES: 'broker,controller'
KAFKA_NODE_ID: 1
KAFKA_CONTROLLER_QUORUM_VOTERS: '1@kafka0:29093'
KAFKA_LISTENERS: 'PLAINTEXT://kafka0:29092,CONTROLLER://kafka0:29093,PLAINTEXT_HOST://0.0.0.0:9092'
KAFKA_INTER_BROKER_LISTENER_NAME: 'PLAINTEXT'
KAFKA_CONTROLLER_LISTENER_NAMES: 'CONTROLLER'
KAFKA_LOG_DIRS: '/tmp/kraft-combined-logs'
KAFKA_OPTS: -javaagent:/usr/share/jmx_exporter/jmx_prometheus_javaagent.jar=11001:/usr/share/jmx_exporter/kafka-broker.yml
volumes:
- ./jmx-exporter:/usr/share/jmx_exporter/
- ./scripts/update_run.sh:/tmp/update_run.sh
command: "bash -c 'if [ ! -f /tmp/update_run.sh ]; then echo \"ERROR: Did you forget the update_run.sh file that came with this docker-compose.yml file?\" && exit 1 ; else /tmp/update_run.sh && /usr/share/jmx_exporter/kafka-prepare-and-run ; fi'"
kafka-ui:
container_name: kafka-ui
image: provectuslabs/kafka-ui:latest
ports:
- 8080:8080
depends_on:
- kafka0
environment:
KAFKA_CLUSTERS_0_NAME: local
KAFKA_CLUSTERS_0_BOOTSTRAPSERVERS: kafka0:29092
KAFKA_CLUSTERS_0_METRICS_PORT: 11001
KAFKA_CLUSTERS_0_METRICS_TYPE: PROMETHEUS

View file

@ -8,88 +8,86 @@ services:
ports:
- 8080:8080
depends_on:
- zookeeper0
- zookeeper1
- kafka0
- kafka1
- schemaregistry0
- schemaregistry1
- kafka-connect0
environment:
KAFKA_CLUSTERS_0_NAME: local
KAFKA_CLUSTERS_0_BOOTSTRAPSERVERS: kafka0:29092
KAFKA_CLUSTERS_0_METRICS_PORT: 9997
KAFKA_CLUSTERS_0_ZOOKEEPER: zookeeper0:2181
KAFKA_CLUSTERS_0_JMXPORT: 9997
KAFKA_CLUSTERS_0_SCHEMAREGISTRY: http://schemaregistry0:8085
KAFKA_CLUSTERS_0_KAFKACONNECT_0_NAME: first
KAFKA_CLUSTERS_0_KAFKACONNECT_0_ADDRESS: http://kafka-connect0:8083
KAFKA_CLUSTERS_1_NAME: secondLocal
KAFKA_CLUSTERS_1_BOOTSTRAPSERVERS: kafka1:29092
KAFKA_CLUSTERS_1_METRICS_PORT: 9998
KAFKA_CLUSTERS_1_ZOOKEEPER: zookeeper1:2181
KAFKA_CLUSTERS_1_JMXPORT: 9998
KAFKA_CLUSTERS_1_SCHEMAREGISTRY: http://schemaregistry1:8085
DYNAMIC_CONFIG_ENABLED: 'true'
KAFKA_CLUSTERS_1_KAFKACONNECT_0_NAME: first
KAFKA_CLUSTERS_1_KAFKACONNECT_0_ADDRESS: http://kafka-connect0:8083
zookeeper0:
image: confluentinc/cp-zookeeper:5.2.4
environment:
ZOOKEEPER_CLIENT_PORT: 2181
ZOOKEEPER_TICK_TIME: 2000
ports:
- 2181:2181
kafka0:
image: confluentinc/cp-kafka:7.2.1
hostname: kafka0
container_name: kafka0
image: confluentinc/cp-kafka:5.3.1
depends_on:
- zookeeper0
ports:
- "9092:9092"
- "9997:9997"
- 9092:9092
- 9997:9997
environment:
KAFKA_BROKER_ID: 1
KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: 'CONTROLLER:PLAINTEXT,PLAINTEXT:PLAINTEXT,PLAINTEXT_HOST:PLAINTEXT'
KAFKA_ADVERTISED_LISTENERS: 'PLAINTEXT://kafka0:29092,PLAINTEXT_HOST://localhost:9092'
KAFKA_ZOOKEEPER_CONNECT: zookeeper0:2181
KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://kafka0:29092,PLAINTEXT_HOST://localhost:9092
KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: PLAINTEXT:PLAINTEXT,PLAINTEXT_HOST:PLAINTEXT
KAFKA_INTER_BROKER_LISTENER_NAME: PLAINTEXT
KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 1
KAFKA_GROUP_INITIAL_REBALANCE_DELAY_MS: 0
KAFKA_TRANSACTION_STATE_LOG_MIN_ISR: 1
KAFKA_TRANSACTION_STATE_LOG_REPLICATION_FACTOR: 1
KAFKA_JMX_PORT: 9997
JMX_PORT: 9997
KAFKA_JMX_OPTS: -Dcom.sun.management.jmxremote -Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.ssl=false -Djava.rmi.server.hostname=kafka0 -Dcom.sun.management.jmxremote.rmi.port=9997
KAFKA_PROCESS_ROLES: 'broker,controller'
KAFKA_NODE_ID: 1
KAFKA_CONTROLLER_QUORUM_VOTERS: '1@kafka0:29093'
KAFKA_LISTENERS: 'PLAINTEXT://kafka0:29092,CONTROLLER://kafka0:29093,PLAINTEXT_HOST://0.0.0.0:9092'
KAFKA_INTER_BROKER_LISTENER_NAME: 'PLAINTEXT'
KAFKA_CONTROLLER_LISTENER_NAMES: 'CONTROLLER'
KAFKA_LOG_DIRS: '/tmp/kraft-combined-logs'
volumes:
- ./scripts/update_run.sh:/tmp/update_run.sh
command: "bash -c 'if [ ! -f /tmp/update_run.sh ]; then echo \"ERROR: Did you forget the update_run.sh file that came with this docker-compose.yml file?\" && exit 1 ; else /tmp/update_run.sh && /etc/confluent/docker/run ; fi'"
zookeeper1:
image: confluentinc/cp-zookeeper:5.2.4
environment:
ZOOKEEPER_CLIENT_PORT: 2181
ZOOKEEPER_TICK_TIME: 2000
kafka1:
image: confluentinc/cp-kafka:7.2.1
hostname: kafka1
container_name: kafka1
image: confluentinc/cp-kafka:5.3.1
depends_on:
- zookeeper1
ports:
- "9093:9092"
- "9998:9998"
- 9093:9093
- 9998:9998
environment:
KAFKA_BROKER_ID: 1
KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: 'CONTROLLER:PLAINTEXT,PLAINTEXT:PLAINTEXT,PLAINTEXT_HOST:PLAINTEXT'
KAFKA_ADVERTISED_LISTENERS: 'PLAINTEXT://kafka1:29092,PLAINTEXT_HOST://localhost:9092'
KAFKA_ZOOKEEPER_CONNECT: zookeeper1:2181
KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://kafka1:29092,PLAINTEXT_HOST://localhost:9093
KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: PLAINTEXT:PLAINTEXT,PLAINTEXT_HOST:PLAINTEXT
KAFKA_INTER_BROKER_LISTENER_NAME: PLAINTEXT
KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 1
KAFKA_GROUP_INITIAL_REBALANCE_DELAY_MS: 0
KAFKA_TRANSACTION_STATE_LOG_MIN_ISR: 1
KAFKA_TRANSACTION_STATE_LOG_REPLICATION_FACTOR: 1
KAFKA_JMX_PORT: 9998
KAFKA_JMX_OPTS: -Dcom.sun.management.jmxremote -Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.ssl=false -Djava.rmi.server.hostname=kafka0 -Dcom.sun.management.jmxremote.rmi.port=9998
KAFKA_PROCESS_ROLES: 'broker,controller'
KAFKA_NODE_ID: 1
KAFKA_CONTROLLER_QUORUM_VOTERS: '1@kafka1:29093'
KAFKA_LISTENERS: 'PLAINTEXT://kafka1:29092,CONTROLLER://kafka1:29093,PLAINTEXT_HOST://0.0.0.0:9092'
KAFKA_INTER_BROKER_LISTENER_NAME: 'PLAINTEXT'
KAFKA_CONTROLLER_LISTENER_NAMES: 'CONTROLLER'
KAFKA_LOG_DIRS: '/tmp/kraft-combined-logs'
volumes:
- ./scripts/update_run.sh:/tmp/update_run.sh
command: "bash -c 'if [ ! -f /tmp/update_run.sh ]; then echo \"ERROR: Did you forget the update_run.sh file that came with this docker-compose.yml file?\" && exit 1 ; else /tmp/update_run.sh && /etc/confluent/docker/run ; fi'"
JMX_PORT: 9998
KAFKA_JMX_OPTS: -Dcom.sun.management.jmxremote -Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.ssl=false -Djava.rmi.server.hostname=kafka1 -Dcom.sun.management.jmxremote.rmi.port=9998
schemaregistry0:
image: confluentinc/cp-schema-registry:7.2.1
image: confluentinc/cp-schema-registry:5.5.0
ports:
- 8085:8085
depends_on:
- zookeeper0
- kafka0
environment:
SCHEMA_REGISTRY_KAFKASTORE_BOOTSTRAP_SERVERS: PLAINTEXT://kafka0:29092
SCHEMA_REGISTRY_KAFKASTORE_CONNECTION_URL: zookeeper0:2181
SCHEMA_REGISTRY_KAFKASTORE_SECURITY_PROTOCOL: PLAINTEXT
SCHEMA_REGISTRY_HOST_NAME: schemaregistry0
SCHEMA_REGISTRY_LISTENERS: http://schemaregistry0:8085
@ -99,13 +97,15 @@ services:
SCHEMA_REGISTRY_KAFKASTORE_TOPIC: _schemas
schemaregistry1:
image: confluentinc/cp-schema-registry:7.2.1
image: confluentinc/cp-schema-registry:5.5.0
ports:
- 18085:8085
depends_on:
- zookeeper1
- kafka1
environment:
SCHEMA_REGISTRY_KAFKASTORE_BOOTSTRAP_SERVERS: PLAINTEXT://kafka1:29092
SCHEMA_REGISTRY_KAFKASTORE_CONNECTION_URL: zookeeper1:2181
SCHEMA_REGISTRY_KAFKASTORE_SECURITY_PROTOCOL: PLAINTEXT
SCHEMA_REGISTRY_HOST_NAME: schemaregistry1
SCHEMA_REGISTRY_LISTENERS: http://schemaregistry1:8085
@ -115,7 +115,7 @@ services:
SCHEMA_REGISTRY_KAFKASTORE_TOPIC: _schemas
kafka-connect0:
image: confluentinc/cp-kafka-connect:7.2.1
image: confluentinc/cp-kafka-connect:6.0.1
ports:
- 8083:8083
depends_on:
@ -140,14 +140,14 @@ services:
CONNECT_PLUGIN_PATH: "/usr/share/java,/usr/share/confluent-hub-components"
kafka-init-topics:
image: confluentinc/cp-kafka:7.2.1
image: confluentinc/cp-kafka:5.3.1
volumes:
- ./data/message.json:/data/message.json
- ./message.json:/data/message.json
depends_on:
- kafka1
command: "bash -c 'echo Waiting for Kafka to be ready... && \
cub kafka-ready -b kafka1:29092 1 30 && \
kafka-topics --create --topic second.users --partitions 3 --replication-factor 1 --if-not-exists --bootstrap-server kafka1:29092 && \
kafka-topics --create --topic second.messages --partitions 2 --replication-factor 1 --if-not-exists --bootstrap-server kafka1:29092 && \
kafka-topics --create --topic first.messages --partitions 2 --replication-factor 1 --if-not-exists --bootstrap-server kafka0:29092 && \
kafka-console-producer --bootstrap-server kafka1:29092 -topic second.users < /data/message.json'"
kafka-topics --create --topic second.users --partitions 3 --replication-factor 1 --if-not-exists --zookeeper zookeeper1:2181 && \
kafka-topics --create --topic second.messages --partitions 2 --replication-factor 1 --if-not-exists --zookeeper zookeeper1:2181 && \
kafka-topics --create --topic first.messages --partitions 2 --replication-factor 1 --if-not-exists --zookeeper zookeeper0:2181 && \
kafka-console-producer --broker-list kafka1:29092 -topic second.users < /data/message.json'"

View file

@ -1,48 +0,0 @@
---
version: '2'
services:
zookeeper:
image: confluentinc/cp-zookeeper:7.2.1
hostname: zookeeper
container_name: zookeeper
ports:
- "2181:2181"
environment:
ZOOKEEPER_CLIENT_PORT: 2181
ZOOKEEPER_TICK_TIME: 2000
kafka:
image: confluentinc/cp-server:7.2.1
hostname: kafka
container_name: kafka
depends_on:
- zookeeper
ports:
- "9092:9092"
- "9997:9997"
environment:
KAFKA_BROKER_ID: 1
KAFKA_ZOOKEEPER_CONNECT: 'zookeeper:2181'
KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: PLAINTEXT:PLAINTEXT,PLAINTEXT_HOST:PLAINTEXT
KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://kafka:29092,PLAINTEXT_HOST://localhost:9092
KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 1
KAFKA_GROUP_INITIAL_REBALANCE_DELAY_MS: 0
KAFKA_CONFLUENT_LICENSE_TOPIC_REPLICATION_FACTOR: 1
KAFKA_CONFLUENT_BALANCER_TOPIC_REPLICATION_FACTOR: 1
KAFKA_TRANSACTION_STATE_LOG_MIN_ISR: 1
KAFKA_TRANSACTION_STATE_LOG_REPLICATION_FACTOR: 1
KAFKA_JMX_PORT: 9997
KAFKA_JMX_HOSTNAME: kafka
kafka-init-topics:
image: confluentinc/cp-kafka:7.2.1
volumes:
- ./data/message.json:/data/message.json
depends_on:
- kafka
command: "bash -c 'echo Waiting for Kafka to be ready... && \
cub kafka-ready -b kafka:29092 1 30 && \
kafka-topics --create --topic users --partitions 3 --replication-factor 1 --if-not-exists --bootstrap-server kafka:29092 && \
kafka-topics --create --topic messages --partitions 2 --replication-factor 1 --if-not-exists --bootstrap-server kafka:29092 && \
kafka-console-producer --bootstrap-server kafka:29092 --topic users < /data/message.json'"

View file

@ -1,79 +0,0 @@
---
version: '2'
services:
kafka-ui:
container_name: kafka-ui
image: provectuslabs/kafka-ui:latest
ports:
- 8080:8080
depends_on:
- kafka0
- schemaregistry0
environment:
KAFKA_CLUSTERS_0_NAME: local
KAFKA_CLUSTERS_0_BOOTSTRAPSERVERS: kafka0:29092
KAFKA_CLUSTERS_0_METRICS_PORT: 9997
KAFKA_CLUSTERS_0_SCHEMAREGISTRY: http://schemaregistry0:8085
AUTH_TYPE: "LDAP"
SPRING_LDAP_URLS: "ldap://ldap:10389"
SPRING_LDAP_BASE: "cn={0},ou=people,dc=planetexpress,dc=com"
SPRING_LDAP_ADMIN_USER: "cn=admin,dc=planetexpress,dc=com"
SPRING_LDAP_ADMIN_PASSWORD: "GoodNewsEveryone"
SPRING_LDAP_USER_FILTER_SEARCH_BASE: "dc=planetexpress,dc=com"
SPRING_LDAP_USER_FILTER_SEARCH_FILTER: "(&(uid={0})(objectClass=inetOrgPerson))"
SPRING_LDAP_GROUP_FILTER_SEARCH_BASE: "ou=people,dc=planetexpress,dc=com"
# OAUTH2.LDAP.ACTIVEDIRECTORY: true
# OAUTH2.LDAP.AСTIVEDIRECTORY.DOMAIN: "memelord.lol"
ldap:
image: rroemhild/test-openldap:latest
hostname: "ldap"
ports:
- 10389:10389
kafka0:
image: confluentinc/cp-kafka:7.2.1
hostname: kafka0
container_name: kafka0
ports:
- "9092:9092"
- "9997:9997"
environment:
KAFKA_BROKER_ID: 1
KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: 'CONTROLLER:PLAINTEXT,PLAINTEXT:PLAINTEXT,PLAINTEXT_HOST:PLAINTEXT'
KAFKA_ADVERTISED_LISTENERS: 'PLAINTEXT://kafka0:29092,PLAINTEXT_HOST://localhost:9092'
KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 1
KAFKA_GROUP_INITIAL_REBALANCE_DELAY_MS: 0
KAFKA_TRANSACTION_STATE_LOG_MIN_ISR: 1
KAFKA_TRANSACTION_STATE_LOG_REPLICATION_FACTOR: 1
KAFKA_JMX_PORT: 9997
KAFKA_JMX_HOSTNAME: localhost
KAFKA_JMX_OPTS: -Dcom.sun.management.jmxremote -Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.ssl=false -Djava.rmi.server.hostname=kafka0 -Dcom.sun.management.jmxremote.rmi.port=9997
KAFKA_PROCESS_ROLES: 'broker,controller'
KAFKA_NODE_ID: 1
KAFKA_CONTROLLER_QUORUM_VOTERS: '1@kafka0:29093'
KAFKA_LISTENERS: 'PLAINTEXT://kafka0:29092,CONTROLLER://kafka0:29093,PLAINTEXT_HOST://0.0.0.0:9092'
KAFKA_INTER_BROKER_LISTENER_NAME: 'PLAINTEXT'
KAFKA_CONTROLLER_LISTENER_NAMES: 'CONTROLLER'
KAFKA_LOG_DIRS: '/tmp/kraft-combined-logs'
volumes:
- ./scripts/update_run.sh:/tmp/update_run.sh
command: "bash -c 'if [ ! -f /tmp/update_run.sh ]; then echo \"ERROR: Did you forget the update_run.sh file that came with this docker-compose.yml file?\" && exit 1 ; else /tmp/update_run.sh && /etc/confluent/docker/run ; fi'"
schemaregistry0:
image: confluentinc/cp-schema-registry:7.2.1
ports:
- 8085:8085
depends_on:
- kafka0
environment:
SCHEMA_REGISTRY_KAFKASTORE_BOOTSTRAP_SERVERS: PLAINTEXT://kafka0:29092
SCHEMA_REGISTRY_KAFKASTORE_SECURITY_PROTOCOL: PLAINTEXT
SCHEMA_REGISTRY_HOST_NAME: schemaregistry0
SCHEMA_REGISTRY_LISTENERS: http://schemaregistry0:8085
SCHEMA_REGISTRY_SCHEMA_REGISTRY_INTER_INSTANCE_PROTOCOL: "http"
SCHEMA_REGISTRY_LOG4J_ROOT_LOGLEVEL: INFO
SCHEMA_REGISTRY_KAFKASTORE_TOPIC: _schemas

View file

@ -1,19 +0,0 @@
---
version: '2'
services:
nginx:
image: nginx:latest
volumes:
- ./data/proxy.conf:/etc/nginx/conf.d/default.conf
ports:
- 8080:80
kafka-ui:
container_name: kafka-ui
image: provectuslabs/kafka-ui:latest
ports:
- 8082:8080
environment:
KAFKA_CLUSTERS_0_NAME: local
KAFKA_CLUSTERS_0_BOOTSTRAPSERVERS: kafka:9092
SERVER_SERVLET_CONTEXT_PATH: /kafka-ui

View file

@ -1,15 +0,0 @@
syntax = "proto3";
package test;
import "google/protobuf/wrappers.proto";
message MyKey {
string myKeyF1 = 1;
google.protobuf.UInt64Value uint_64_wrapper = 2;
}
message MySpecificTopicKey {
string special_field1 = 1;
string special_field2 = 2;
google.protobuf.FloatValue float_wrapper = 3;
}

View file

@ -1,14 +0,0 @@
syntax = "proto3";
package test;
message MySpecificTopicValue {
string f1 = 1;
string f2 = 2;
}
message MyValue {
int32 version = 1;
string payload = 2;
map<int32, string> intToStringMap = 3;
map<string, MyValue> strToObjMap = 4;
}

View file

@ -1 +0,0 @@
zlFiTJelTOuhnklFwLWixw

Some files were not shown because too many files have changed in this diff Show more