Ver Fonte

merge with master

iliax há 2 anos atrás
pai
commit
b16ec7671f
100 ficheiros alterados com 3144 adições e 1155 exclusões
  1. 286 0
      .editorconfig
  2. 19 11
      .github/ISSUE_TEMPLATE/bug_report.md
  3. 4 0
      .github/release_drafter.yaml
  4. 2 1
      .github/workflows/aws_publisher.yaml
  5. 9 14
      .github/workflows/backend.yml
  6. 10 15
      .github/workflows/branch-deploy.yml
  7. 10 15
      .github/workflows/build-public-image.yml
  8. 10 1
      .github/workflows/codeql-analysis.yml
  9. 28 0
      .github/workflows/create-branch-for-helm.yaml
  10. 10 16
      .github/workflows/cve.yaml
  11. 1 1
      .github/workflows/delete-public-image.yml
  12. 2 2
      .github/workflows/documentation.yaml
  13. 18 22
      .github/workflows/e2e-checks.yaml
  14. 9 15
      .github/workflows/master.yaml
  15. 1 0
      .github/workflows/release-helm.yaml
  16. 30 0
      .github/workflows/release-serde-api.yaml
  17. 17 41
      .github/workflows/release.yaml
  18. 62 1
      .github/workflows/separate_env_public_create.yml
  19. 0 4
      .github/workflows/terraform-deploy.yml
  20. 0 117
      .mvn/wrapper/MavenWrapperDownloader.java
  21. BIN
      .mvn/wrapper/maven-wrapper.jar
  22. 18 2
      .mvn/wrapper/maven-wrapper.properties
  23. 13 0
      README.md
  24. 2 1
      SECURITY.md
  25. 2 2
      charts/kafka-ui/Chart.yaml
  26. 11 1
      charts/kafka-ui/templates/deployment.yaml
  27. 10 1
      charts/kafka-ui/values.yaml
  28. 54 12
      documentation/compose/e2e-tests.yaml
  29. 180 0
      documentation/compose/kafka-ssl-components.yaml
  30. 111 0
      documentation/compose/kafka-ui-serdes.yaml
  31. 1 3
      documentation/compose/kafka-ui.yaml
  32. 11 0
      documentation/compose/proto/key-types.proto
  33. 12 0
      documentation/compose/proto/values.proto
  34. 2 1
      documentation/compose/ssl/generate_certs.sh
  35. BIN
      documentation/compose/ssl/kafka.keystore.jks
  36. BIN
      documentation/compose/ssl/kafka.truststore.jks
  37. 2 0
      documentation/compose/ssl/san.cnf
  38. 123 0
      documentation/guides/DataMasking.md
  39. 21 3
      documentation/guides/Protobuf.md
  40. 169 0
      documentation/guides/Serialization.md
  41. 7 6
      documentation/project/contributing/prerequisites.md
  42. 3 3
      kafka-ui-api/Dockerfile
  43. 3 24
      kafka-ui-api/pom.xml
  44. 0 22
      kafka-ui-api/src/main/java/com/provectus/kafka/ui/client/KafkaConnectClientsFactory.java
  45. 20 47
      kafka-ui-api/src/main/java/com/provectus/kafka/ui/client/RetryingKafkaConnectClient.java
  46. 27 0
      kafka-ui-api/src/main/java/com/provectus/kafka/ui/config/ClustersProperties.java
  47. 8 0
      kafka-ui-api/src/main/java/com/provectus/kafka/ui/config/auth/AuthenticatedUser.java
  48. 0 80
      kafka-ui-api/src/main/java/com/provectus/kafka/ui/config/auth/CognitoOAuthSecurityConfig.java
  49. 44 0
      kafka-ui-api/src/main/java/com/provectus/kafka/ui/config/auth/OAuthProperties.java
  50. 77 0
      kafka-ui-api/src/main/java/com/provectus/kafka/ui/config/auth/OAuthPropertiesConverter.java
  51. 101 36
      kafka-ui-api/src/main/java/com/provectus/kafka/ui/config/auth/OAuthSecurityConfig.java
  52. 30 0
      kafka-ui-api/src/main/java/com/provectus/kafka/ui/config/auth/RbacOAuth2User.java
  53. 47 0
      kafka-ui-api/src/main/java/com/provectus/kafka/ui/config/auth/RbacOidcUser.java
  54. 10 0
      kafka-ui-api/src/main/java/com/provectus/kafka/ui/config/auth/RbacUser.java
  55. 23 0
      kafka-ui-api/src/main/java/com/provectus/kafka/ui/config/auth/RoleBasedAccessControlProperties.java
  56. 13 0
      kafka-ui-api/src/main/java/com/provectus/kafka/ui/config/auth/condition/CognitoCondition.java
  57. 18 10
      kafka-ui-api/src/main/java/com/provectus/kafka/ui/config/auth/logout/CognitoLogoutSuccessHandler.java
  58. 15 0
      kafka-ui-api/src/main/java/com/provectus/kafka/ui/config/auth/logout/LogoutSuccessHandler.java
  59. 46 0
      kafka-ui-api/src/main/java/com/provectus/kafka/ui/config/auth/logout/OAuthLogoutSuccessHandler.java
  60. 0 44
      kafka-ui-api/src/main/java/com/provectus/kafka/ui/config/auth/props/CognitoProperties.java
  61. 80 0
      kafka-ui-api/src/main/java/com/provectus/kafka/ui/controller/AccessController.java
  62. 68 26
      kafka-ui-api/src/main/java/com/provectus/kafka/ui/controller/BrokersController.java
  63. 39 11
      kafka-ui-api/src/main/java/com/provectus/kafka/ui/controller/ClustersController.java
  64. 129 71
      kafka-ui-api/src/main/java/com/provectus/kafka/ui/controller/ConsumerGroupsController.java
  65. 0 32
      kafka-ui-api/src/main/java/com/provectus/kafka/ui/controller/InfoController.java
  66. 132 34
      kafka-ui-api/src/main/java/com/provectus/kafka/ui/controller/KafkaConnectController.java
  67. 39 13
      kafka-ui-api/src/main/java/com/provectus/kafka/ui/controller/KsqlController.java
  68. 63 18
      kafka-ui-api/src/main/java/com/provectus/kafka/ui/controller/MessagesController.java
  69. 146 43
      kafka-ui-api/src/main/java/com/provectus/kafka/ui/controller/SchemasController.java
  70. 2 4
      kafka-ui-api/src/main/java/com/provectus/kafka/ui/controller/StaticController.java
  71. 179 65
      kafka-ui-api/src/main/java/com/provectus/kafka/ui/controller/TopicsController.java
  72. 9 5
      kafka-ui-api/src/main/java/com/provectus/kafka/ui/emitter/AbstractEmitter.java
  73. 9 2
      kafka-ui-api/src/main/java/com/provectus/kafka/ui/emitter/BackwardRecordEmitter.java
  74. 8 11
      kafka-ui-api/src/main/java/com/provectus/kafka/ui/emitter/ConsumingStats.java
  75. 11 4
      kafka-ui-api/src/main/java/com/provectus/kafka/ui/emitter/ForwardRecordEmitter.java
  76. 6 3
      kafka-ui-api/src/main/java/com/provectus/kafka/ui/emitter/TailingEmitter.java
  77. 2 0
      kafka-ui-api/src/main/java/com/provectus/kafka/ui/exception/ErrorCode.java
  78. 2 2
      kafka-ui-api/src/main/java/com/provectus/kafka/ui/exception/SchemaCompatibilityException.java
  79. 0 12
      kafka-ui-api/src/main/java/com/provectus/kafka/ui/exception/SchemaTypeNotSupportedException.java
  80. 7 81
      kafka-ui-api/src/main/java/com/provectus/kafka/ui/mapper/ClusterMapper.java
  81. 11 7
      kafka-ui-api/src/main/java/com/provectus/kafka/ui/mapper/ConsumerGroupMapper.java
  82. 37 0
      kafka-ui-api/src/main/java/com/provectus/kafka/ui/mapper/KafkaSrMapper.java
  83. 0 59
      kafka-ui-api/src/main/java/com/provectus/kafka/ui/model/FailoverUrlList.java
  84. 24 0
      kafka-ui-api/src/main/java/com/provectus/kafka/ui/model/InternalBroker.java
  85. 10 5
      kafka-ui-api/src/main/java/com/provectus/kafka/ui/model/InternalClusterState.java
  86. 0 14
      kafka-ui-api/src/main/java/com/provectus/kafka/ui/model/InternalKsqlServer.java
  87. 0 33
      kafka-ui-api/src/main/java/com/provectus/kafka/ui/model/InternalSchemaRegistry.java
  88. 2 2
      kafka-ui-api/src/main/java/com/provectus/kafka/ui/model/InternalTopic.java
  89. 16 4
      kafka-ui-api/src/main/java/com/provectus/kafka/ui/model/KafkaCluster.java
  90. 0 16
      kafka-ui-api/src/main/java/com/provectus/kafka/ui/model/KafkaConnectCluster.java
  91. 9 4
      kafka-ui-api/src/main/java/com/provectus/kafka/ui/model/Metrics.java
  92. 134 0
      kafka-ui-api/src/main/java/com/provectus/kafka/ui/model/rbac/AccessContext.java
  93. 72 0
      kafka-ui-api/src/main/java/com/provectus/kafka/ui/model/rbac/Permission.java
  94. 21 0
      kafka-ui-api/src/main/java/com/provectus/kafka/ui/model/rbac/Resource.java
  95. 19 0
      kafka-ui-api/src/main/java/com/provectus/kafka/ui/model/rbac/Role.java
  96. 24 0
      kafka-ui-api/src/main/java/com/provectus/kafka/ui/model/rbac/Subject.java
  97. 18 0
      kafka-ui-api/src/main/java/com/provectus/kafka/ui/model/rbac/permission/ClusterConfigAction.java
  98. 19 0
      kafka-ui-api/src/main/java/com/provectus/kafka/ui/model/rbac/permission/ConnectAction.java
  99. 20 0
      kafka-ui-api/src/main/java/com/provectus/kafka/ui/model/rbac/permission/ConsumerGroupAction.java
  100. 15 0
      kafka-ui-api/src/main/java/com/provectus/kafka/ui/model/rbac/permission/KsqlAction.java

+ 286 - 0
.editorconfig

@@ -0,0 +1,286 @@
+[*]
+charset = utf-8
+end_of_line = lf
+indent_size = 4
+indent_style = space
+insert_final_newline = true
+max_line_length = 120
+tab_width = 4
+ij_continuation_indent_size = 8
+ij_formatter_off_tag = @formatter:off
+ij_formatter_on_tag = @formatter:on
+ij_formatter_tags_enabled = true
+ij_smart_tabs = false
+ij_visual_guides = none
+ij_wrap_on_typing = false
+trim_trailing_whitespace = true
+
+[*.java]
+indent_size = 2
+ij_continuation_indent_size = 4
+ij_java_align_consecutive_assignments = false
+ij_java_align_consecutive_variable_declarations = false
+ij_java_align_group_field_declarations = false
+ij_java_align_multiline_annotation_parameters = false
+ij_java_align_multiline_array_initializer_expression = false
+ij_java_align_multiline_assignment = false
+ij_java_align_multiline_binary_operation = false
+ij_java_align_multiline_chained_methods = false
+ij_java_align_multiline_extends_list = false
+ij_java_align_multiline_for = true
+ij_java_align_multiline_method_parentheses = false
+ij_java_align_multiline_parameters = true
+ij_java_align_multiline_parameters_in_calls = false
+ij_java_align_multiline_parenthesized_expression = false
+ij_java_align_multiline_records = true
+ij_java_align_multiline_resources = true
+ij_java_align_multiline_ternary_operation = false
+ij_java_align_multiline_text_blocks = false
+ij_java_align_multiline_throws_list = false
+ij_java_align_subsequent_simple_methods = false
+ij_java_align_throws_keyword = false
+ij_java_align_types_in_multi_catch = true
+ij_java_annotation_parameter_wrap = off
+ij_java_array_initializer_new_line_after_left_brace = false
+ij_java_array_initializer_right_brace_on_new_line = false
+ij_java_array_initializer_wrap = normal
+ij_java_assert_statement_colon_on_next_line = false
+ij_java_assert_statement_wrap = normal
+ij_java_assignment_wrap = normal
+ij_java_binary_operation_sign_on_next_line = false
+ij_java_binary_operation_wrap = normal
+ij_java_blank_lines_after_anonymous_class_header = 0
+ij_java_blank_lines_after_class_header = 0
+ij_java_blank_lines_after_imports = 1
+ij_java_blank_lines_after_package = 1
+ij_java_blank_lines_around_class = 1
+ij_java_blank_lines_around_field = 0
+ij_java_blank_lines_around_field_in_interface = 0
+ij_java_blank_lines_around_initializer = 1
+ij_java_blank_lines_around_method = 1
+ij_java_blank_lines_around_method_in_interface = 1
+ij_java_blank_lines_before_class_end = 0
+ij_java_blank_lines_before_imports = 1
+ij_java_blank_lines_before_method_body = 0
+ij_java_blank_lines_before_package = 1
+ij_java_block_brace_style = end_of_line
+ij_java_block_comment_add_space = false
+ij_java_block_comment_at_first_column = true
+ij_java_builder_methods = none
+ij_java_call_parameters_new_line_after_left_paren = false
+ij_java_call_parameters_right_paren_on_new_line = false
+ij_java_call_parameters_wrap = normal
+ij_java_case_statement_on_separate_line = true
+ij_java_catch_on_new_line = false
+ij_java_class_annotation_wrap = split_into_lines
+ij_java_class_brace_style = end_of_line
+ij_java_class_count_to_use_import_on_demand = 999
+ij_java_class_names_in_javadoc = 1
+ij_java_do_not_indent_top_level_class_members = false
+ij_java_do_not_wrap_after_single_annotation = false
+ij_java_do_not_wrap_after_single_annotation_in_parameter = false
+ij_java_do_while_brace_force = always
+ij_java_doc_add_blank_line_after_description = true
+ij_java_doc_add_blank_line_after_param_comments = false
+ij_java_doc_add_blank_line_after_return = false
+ij_java_doc_add_p_tag_on_empty_lines = true
+ij_java_doc_align_exception_comments = true
+ij_java_doc_align_param_comments = true
+ij_java_doc_do_not_wrap_if_one_line = false
+ij_java_doc_enable_formatting = true
+ij_java_doc_enable_leading_asterisks = true
+ij_java_doc_indent_on_continuation = false
+ij_java_doc_keep_empty_lines = true
+ij_java_doc_keep_empty_parameter_tag = true
+ij_java_doc_keep_empty_return_tag = true
+ij_java_doc_keep_empty_throws_tag = true
+ij_java_doc_keep_invalid_tags = true
+ij_java_doc_param_description_on_new_line = false
+ij_java_doc_preserve_line_breaks = false
+ij_java_doc_use_throws_not_exception_tag = true
+ij_java_else_on_new_line = false
+ij_java_entity_dd_suffix = EJB
+ij_java_entity_eb_suffix = Bean
+ij_java_entity_hi_suffix = Home
+ij_java_entity_lhi_prefix = Local
+ij_java_entity_lhi_suffix = Home
+ij_java_entity_li_prefix = Local
+ij_java_entity_pk_class = java.lang.String
+ij_java_entity_vo_suffix = VO
+ij_java_enum_constants_wrap = normal
+ij_java_extends_keyword_wrap = normal
+ij_java_extends_list_wrap = normal
+ij_java_field_annotation_wrap = split_into_lines
+ij_java_finally_on_new_line = false
+ij_java_for_brace_force = always
+ij_java_for_statement_new_line_after_left_paren = false
+ij_java_for_statement_right_paren_on_new_line = false
+ij_java_for_statement_wrap = normal
+ij_java_generate_final_locals = false
+ij_java_generate_final_parameters = false
+ij_java_if_brace_force = always
+ij_java_imports_layout = $*,|,*
+ij_java_indent_case_from_switch = true
+ij_java_insert_inner_class_imports = false
+ij_java_insert_override_annotation = true
+ij_java_keep_blank_lines_before_right_brace = 2
+ij_java_keep_blank_lines_between_package_declaration_and_header = 2
+ij_java_keep_blank_lines_in_code = 2
+ij_java_keep_blank_lines_in_declarations = 2
+ij_java_keep_builder_methods_indents = false
+ij_java_keep_control_statement_in_one_line = true
+ij_java_keep_first_column_comment = true
+ij_java_keep_indents_on_empty_lines = false
+ij_java_keep_line_breaks = true
+ij_java_keep_multiple_expressions_in_one_line = false
+ij_java_keep_simple_blocks_in_one_line = false
+ij_java_keep_simple_classes_in_one_line = false
+ij_java_keep_simple_lambdas_in_one_line = false
+ij_java_keep_simple_methods_in_one_line = false
+ij_java_label_indent_absolute = false
+ij_java_label_indent_size = 0
+ij_java_lambda_brace_style = end_of_line
+ij_java_layout_static_imports_separately = true
+ij_java_line_comment_add_space = false
+ij_java_line_comment_add_space_on_reformat = false
+ij_java_line_comment_at_first_column = true
+ij_java_message_dd_suffix = EJB
+ij_java_message_eb_suffix = Bean
+ij_java_method_annotation_wrap = split_into_lines
+ij_java_method_brace_style = end_of_line
+ij_java_method_call_chain_wrap = normal
+ij_java_method_parameters_new_line_after_left_paren = false
+ij_java_method_parameters_right_paren_on_new_line = false
+ij_java_method_parameters_wrap = normal
+ij_java_modifier_list_wrap = false
+ij_java_multi_catch_types_wrap = normal
+ij_java_names_count_to_use_import_on_demand = 999
+ij_java_new_line_after_lparen_in_annotation = false
+ij_java_new_line_after_lparen_in_record_header = false
+ij_java_parameter_annotation_wrap = normal
+ij_java_parentheses_expression_new_line_after_left_paren = false
+ij_java_parentheses_expression_right_paren_on_new_line = false
+ij_java_place_assignment_sign_on_next_line = false
+ij_java_prefer_longer_names = true
+ij_java_prefer_parameters_wrap = false
+ij_java_record_components_wrap = normal
+ij_java_repeat_synchronized = true
+ij_java_replace_instanceof_and_cast = false
+ij_java_replace_null_check = true
+ij_java_replace_sum_lambda_with_method_ref = true
+ij_java_resource_list_new_line_after_left_paren = false
+ij_java_resource_list_right_paren_on_new_line = false
+ij_java_resource_list_wrap = normal
+ij_java_rparen_on_new_line_in_annotation = false
+ij_java_rparen_on_new_line_in_record_header = false
+ij_java_session_dd_suffix = EJB
+ij_java_session_eb_suffix = Bean
+ij_java_session_hi_suffix = Home
+ij_java_session_lhi_prefix = Local
+ij_java_session_lhi_suffix = Home
+ij_java_session_li_prefix = Local
+ij_java_session_si_suffix = Service
+ij_java_space_after_closing_angle_bracket_in_type_argument = false
+ij_java_space_after_colon = true
+ij_java_space_after_comma = true
+ij_java_space_after_comma_in_type_arguments = true
+ij_java_space_after_for_semicolon = true
+ij_java_space_after_quest = true
+ij_java_space_after_type_cast = true
+ij_java_space_before_annotation_array_initializer_left_brace = false
+ij_java_space_before_annotation_parameter_list = false
+ij_java_space_before_array_initializer_left_brace = true
+ij_java_space_before_catch_keyword = true
+ij_java_space_before_catch_left_brace = true
+ij_java_space_before_catch_parentheses = true
+ij_java_space_before_class_left_brace = true
+ij_java_space_before_colon = true
+ij_java_space_before_colon_in_foreach = true
+ij_java_space_before_comma = false
+ij_java_space_before_do_left_brace = true
+ij_java_space_before_else_keyword = true
+ij_java_space_before_else_left_brace = true
+ij_java_space_before_finally_keyword = true
+ij_java_space_before_finally_left_brace = true
+ij_java_space_before_for_left_brace = true
+ij_java_space_before_for_parentheses = true
+ij_java_space_before_for_semicolon = false
+ij_java_space_before_if_left_brace = true
+ij_java_space_before_if_parentheses = true
+ij_java_space_before_method_call_parentheses = false
+ij_java_space_before_method_left_brace = true
+ij_java_space_before_method_parentheses = false
+ij_java_space_before_opening_angle_bracket_in_type_parameter = false
+ij_java_space_before_quest = true
+ij_java_space_before_switch_left_brace = true
+ij_java_space_before_switch_parentheses = true
+ij_java_space_before_synchronized_left_brace = true
+ij_java_space_before_synchronized_parentheses = true
+ij_java_space_before_try_left_brace = true
+ij_java_space_before_try_parentheses = true
+ij_java_space_before_type_parameter_list = false
+ij_java_space_before_while_keyword = true
+ij_java_space_before_while_left_brace = true
+ij_java_space_before_while_parentheses = true
+ij_java_space_inside_one_line_enum_braces = false
+ij_java_space_within_empty_array_initializer_braces = false
+ij_java_space_within_empty_method_call_parentheses = false
+ij_java_space_within_empty_method_parentheses = false
+ij_java_spaces_around_additive_operators = true
+ij_java_spaces_around_annotation_eq = true
+ij_java_spaces_around_assignment_operators = true
+ij_java_spaces_around_bitwise_operators = true
+ij_java_spaces_around_equality_operators = true
+ij_java_spaces_around_lambda_arrow = true
+ij_java_spaces_around_logical_operators = true
+ij_java_spaces_around_method_ref_dbl_colon = false
+ij_java_spaces_around_multiplicative_operators = true
+ij_java_spaces_around_relational_operators = true
+ij_java_spaces_around_shift_operators = true
+ij_java_spaces_around_type_bounds_in_type_parameters = true
+ij_java_spaces_around_unary_operator = false
+ij_java_spaces_within_angle_brackets = false
+ij_java_spaces_within_annotation_parentheses = false
+ij_java_spaces_within_array_initializer_braces = false
+ij_java_spaces_within_braces = false
+ij_java_spaces_within_brackets = false
+ij_java_spaces_within_cast_parentheses = false
+ij_java_spaces_within_catch_parentheses = false
+ij_java_spaces_within_for_parentheses = false
+ij_java_spaces_within_if_parentheses = false
+ij_java_spaces_within_method_call_parentheses = false
+ij_java_spaces_within_method_parentheses = false
+ij_java_spaces_within_parentheses = false
+ij_java_spaces_within_record_header = false
+ij_java_spaces_within_switch_parentheses = false
+ij_java_spaces_within_synchronized_parentheses = false
+ij_java_spaces_within_try_parentheses = false
+ij_java_spaces_within_while_parentheses = false
+ij_java_special_else_if_treatment = true
+ij_java_subclass_name_suffix = Impl
+ij_java_ternary_operation_signs_on_next_line = false
+ij_java_ternary_operation_wrap = normal
+ij_java_test_name_suffix = Test
+ij_java_throws_keyword_wrap = normal
+ij_java_throws_list_wrap = normal
+ij_java_use_external_annotations = false
+ij_java_use_fq_class_names = false
+ij_java_use_relative_indents = false
+ij_java_use_single_class_imports = true
+ij_java_variable_annotation_wrap = normal
+ij_java_visibility = public
+ij_java_while_brace_force = always
+ij_java_while_on_new_line = false
+ij_java_wrap_comments = false
+ij_java_wrap_first_method_in_call_chain = false
+ij_java_wrap_long_lines = false
+
+[*.md]
+insert_final_newline = false
+trim_trailing_whitespace = false
+
+[*.yaml]
+indent_size = 2
+[*.yml]
+indent_size = 2
+

+ 19 - 11
.github/ISSUE_TEMPLATE/bug_report.md

@@ -15,33 +15,36 @@ https://github.com/provectus/kafka-ui/discussions
 
 
 -->
 -->
 
 
-**Describe the bug**
-<!--(A clear and concise description of what the bug is.)-->
+<!--
+Please follow the naming conventions for bugs:
+<Feature/Area/Scope> :  <Compact, but specific problem summary> 
+Avoid generic titles, like “Topics: incorrect layout of message sorting drop-down list”. Better use something like: “Topics: Message sorting drop-down list overlaps the "Submit" button”.
+
+-->
 
 
+**Describe the bug** (Actual behavior)
+<!--(A clear and concise description of what the bug is.Use a list, if there is more than one problem)-->
+
+**Expected behavior**
+<!--(A clear and concise description of what you expected to happen.)-->
 
 
 **Set up**
 **Set up**
 <!--
 <!--
+WE MIGHT CLOSE THE ISSUE without further explanation IF YOU DON'T PROVIDE THIS INFORMATION.
+
 How do you run the app? Please provide as much info as possible:
 How do you run the app? Please provide as much info as possible:
 1. App version (docker image version or check commit hash in the top left corner in UI)
 1. App version (docker image version or check commit hash in the top left corner in UI)
 2. Helm chart version, if you use one
 2. Helm chart version, if you use one
 3. Any IAAC configs
 3. Any IAAC configs
-
-We might close the issue without further explanation if you don't provide such information.
 -->
 -->
 
 
 
 
 **Steps to Reproduce**
 **Steps to Reproduce**
 <!-- We'd like you to provide an example setup (via docker-compose, helm, etc.) 
 <!-- We'd like you to provide an example setup (via docker-compose, helm, etc.) 
 to reproduce the problem, especially with a complex setups. -->
 to reproduce the problem, especially with a complex setups. -->
-Steps to reproduce the behavior:
 
 
 1. 
 1. 
 
 
-**Expected behavior**
-<!--
-(A clear and concise description of what you expected to happen)
--->
-
 **Screenshots**
 **Screenshots**
 <!--
 <!--
 (If applicable, add screenshots to help explain your problem)
 (If applicable, add screenshots to help explain your problem)
@@ -50,5 +53,10 @@ Steps to reproduce the behavior:
 
 
 **Additional context**
 **Additional context**
 <!--
 <!--
-(Add any other context about the problem here)
+Add any other context about the problem here. E.g.: 
+1. Are there any alternative scenarios (different data/methods/configuration/setup) you have tried? 
+   Were they successfull or same issue occured? Please provide steps as well.
+2. Related issues (if there are any).
+3. Logs (if available)
+4. Is there any serious impact or behaviour on the end-user because of this issue, that can be overlooked?
 -->
 -->

+ 4 - 0
.github/release_drafter.yaml

@@ -9,6 +9,7 @@ template: |
 exclude-labels:
 exclude-labels:
   - 'scope/infrastructure'
   - 'scope/infrastructure'
   - 'scope/QA'
   - 'scope/QA'
+  - 'scope/AQA'
   - 'type/dependencies'
   - 'type/dependencies'
   - 'type/chore'
   - 'type/chore'
   - 'type/documentation'
   - 'type/documentation'
@@ -24,6 +25,9 @@ categories:
   - title: '🔨Bug Fixes'
   - title: '🔨Bug Fixes'
     labels:
     labels:
       - 'type/bug'
       - 'type/bug'
+  - title: 'Security'
+    labels:
+      - 'type/security'
   - title: '⎈ Helm/K8S Changes'
   - title: '⎈ Helm/K8S Changes'
     labels:
     labels:
       - 'scope/k8s'
       - 'scope/k8s'

+ 2 - 1
.github/workflows/aws_publisher.yaml

@@ -14,6 +14,7 @@ on:
         description: 'If set to true, the request to update AWS Server product version will be raised'
         description: 'If set to true, the request to update AWS Server product version will be raised'
         required: true
         required: true
         default: false
         default: false
+        type: boolean
 
 
 jobs:
 jobs:
   build-ami:
   build-ami:
@@ -57,7 +58,7 @@ jobs:
 
 
       # add fresh AMI to AWS Marketplace
       # add fresh AMI to AWS Marketplace
       - name: Publish Artifact at Marketplace
       - name: Publish Artifact at Marketplace
-        if: ${{ github.event.inputs.PublishOnMarketplace == true }}
+        if: ${{ github.event.inputs.PublishOnMarketplace == 'true' }}
         env:
         env:
           PRODUCT_ID: ${{ secrets.AWS_SERVER_PRODUCT_ID }}
           PRODUCT_ID: ${{ secrets.AWS_SERVER_PRODUCT_ID }}
           RELEASE_VERSION: "${{ github.event.inputs.KafkaUIReleaseVersion }}"
           RELEASE_VERSION: "${{ github.event.inputs.KafkaUIReleaseVersion }}"

+ 9 - 14
.github/workflows/backend.yml

@@ -16,17 +16,12 @@ jobs:
         with:
         with:
           fetch-depth: 0
           fetch-depth: 0
           ref: ${{ github.event.pull_request.head.sha }}
           ref: ${{ github.event.pull_request.head.sha }}
-      - name: Cache local Maven repository
-        uses: actions/cache@v3
-        with:
-          path: ~/.m2/repository
-          key: ${{ runner.os }}-maven-${{ hashFiles('**/pom.xml') }}
-          restore-keys: |
-            ${{ runner.os }}-maven-
-      - name: Set up JDK 1.13
-        uses: actions/setup-java@v1
+      - name: Set up JDK
+        uses: actions/setup-java@v3
         with:
         with:
-          java-version: 1.13
+          java-version: '17'
+          distribution: 'zulu'
+          cache: 'maven'
       - name: Cache SonarCloud packages
       - name: Cache SonarCloud packages
         uses: actions/cache@v3
         uses: actions/cache@v3
         with:
         with:
@@ -41,8 +36,8 @@ jobs:
           HEAD_REF: ${{ github.head_ref }}
           HEAD_REF: ${{ github.head_ref }}
           BASE_REF: ${{ github.base_ref }}
           BASE_REF: ${{ github.base_ref }}
         run: |
         run: |
-          mvn versions:set -DnewVersion=${{ github.event.pull_request.head.sha }}
-          mvn -B verify org.sonarsource.scanner.maven:sonar-maven-plugin:sonar \
+          ./mvnw -B -ntp versions:set -DnewVersion=${{ github.event.pull_request.head.sha }}
+          ./mvnw -B -V -ntp verify org.sonarsource.scanner.maven:sonar-maven-plugin:sonar \
           -Dsonar.projectKey=com.provectus:kafka-ui_backend \
           -Dsonar.projectKey=com.provectus:kafka-ui_backend \
           -Dsonar.pullrequest.key=${{ github.event.pull_request.number }} \
           -Dsonar.pullrequest.key=${{ github.event.pull_request.number }} \
           -Dsonar.pullrequest.branch=$HEAD_REF \
           -Dsonar.pullrequest.branch=$HEAD_REF \
@@ -53,6 +48,6 @@ jobs:
           GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
           GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
           SONAR_TOKEN: ${{ secrets.SONAR_TOKEN_BACKEND }}
           SONAR_TOKEN: ${{ secrets.SONAR_TOKEN_BACKEND }}
         run: |
         run: |
-          mvn versions:set -DnewVersion=$GITHUB_SHA
-          mvn -B verify org.sonarsource.scanner.maven:sonar-maven-plugin:sonar \
+          ./mvnw -B -ntp versions:set -DnewVersion=$GITHUB_SHA
+          ./mvnw -B -V -ntp verify org.sonarsource.scanner.maven:sonar-maven-plugin:sonar \
           -Dsonar.projectKey=com.provectus:kafka-ui_backend
           -Dsonar.projectKey=com.provectus:kafka-ui_backend

+ 10 - 15
.github/workflows/branch-deploy.yml

@@ -14,27 +14,22 @@ jobs:
         id: extract_branch
         id: extract_branch
         run: |
         run: |
           tag='pr${{ github.event.pull_request.number }}'
           tag='pr${{ github.event.pull_request.number }}'
-          echo ::set-output name=tag::${tag}
+          echo "tag=${tag}" >> $GITHUB_OUTPUT
         env:
         env:
           GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
           GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
-      - name: Cache local Maven repository
-        uses: actions/cache@v3
-        with:
-          path: ~/.m2/repository
-          key: ${{ runner.os }}-maven-${{ hashFiles('**/pom.xml') }}
-          restore-keys: |
-            ${{ runner.os }}-maven-
-      - name: Set up JDK 1.13
-        uses: actions/setup-java@v1
+      - name: Set up JDK
+        uses: actions/setup-java@v3
         with:
         with:
-          java-version: 1.13
+          java-version: '17'
+          distribution: 'zulu'
+          cache: 'maven'
       - name: Build
       - name: Build
         id: build
         id: build
         run: |
         run: |
-          mvn versions:set -DnewVersion=$GITHUB_SHA
-          mvn clean package -Pprod -DskipTests
-          export VERSION=$(mvn -q -Dexec.executable=echo -Dexec.args='${project.version}' --non-recursive exec:exec)
-          echo "::set-output name=version::${VERSION}"
+          ./mvnw -B -ntp versions:set -DnewVersion=$GITHUB_SHA
+          ./mvnw -B -V -ntp clean package -Pprod -DskipTests
+          export VERSION=$(./mvnw -q -Dexec.executable=echo -Dexec.args='${project.version}' --non-recursive exec:exec)
+          echo "version=${VERSION}" >> $GITHUB_OUTPUT
       - name: Set up QEMU
       - name: Set up QEMU
         uses: docker/setup-qemu-action@v2
         uses: docker/setup-qemu-action@v2
       - name: Set up Docker Buildx
       - name: Set up Docker Buildx

+ 10 - 15
.github/workflows/build-public-image.yml

@@ -13,25 +13,20 @@ jobs:
         id: extract_branch
         id: extract_branch
         run: |
         run: |
           tag='${{ github.event.pull_request.number }}'
           tag='${{ github.event.pull_request.number }}'
-          echo ::set-output name=tag::${tag}
-      - name: Cache local Maven repository
-        uses: actions/cache@v3
-        with:
-          path: ~/.m2/repository
-          key: ${{ runner.os }}-maven-${{ hashFiles('**/pom.xml') }}
-          restore-keys: |
-            ${{ runner.os }}-maven-
-      - name: Set up JDK 1.13
-        uses: actions/setup-java@v1
+          echo "tag=${tag}" >> $GITHUB_OUTPUT
+      - name: Set up JDK
+        uses: actions/setup-java@v3
         with:
         with:
-          java-version: 1.13
+          java-version: '17'
+          distribution: 'zulu'
+          cache: 'maven'
       - name: Build
       - name: Build
         id: build
         id: build
         run: |
         run: |
-          mvn versions:set -DnewVersion=$GITHUB_SHA
-          mvn clean package -Pprod -DskipTests
-          export VERSION=$(mvn -q -Dexec.executable=echo -Dexec.args='${project.version}' --non-recursive exec:exec)
-          echo "::set-output name=version::${VERSION}"
+          ./mvnw -B -ntp versions:set -DnewVersion=$GITHUB_SHA
+          ./mvnw -B -V -ntp clean package -Pprod -DskipTests
+          export VERSION=$(./mvnw -q -Dexec.executable=echo -Dexec.args='${project.version}' --non-recursive exec:exec)
+          echo "version=${VERSION}" >> $GITHUB_OUTPUT
       - name: Set up QEMU
       - name: Set up QEMU
         uses: docker/setup-qemu-action@v2
         uses: docker/setup-qemu-action@v2
       - name: Set up Docker Buildx
       - name: Set up Docker Buildx

+ 10 - 1
.github/workflows/codeql-analysis.yml

@@ -20,6 +20,8 @@ on:
     paths:
     paths:
     - 'kafka-ui-contract/**'
     - 'kafka-ui-contract/**'
     - 'kafka-ui-react-app/**'
     - 'kafka-ui-react-app/**'
+    - 'kafka-ui-api/**'
+    - 'kafka-ui-serde-api/**'
   schedule:
   schedule:
     - cron: '39 15 * * 6'
     - cron: '39 15 * * 6'
 
 
@@ -31,7 +33,7 @@ jobs:
     strategy:
     strategy:
       fail-fast: false
       fail-fast: false
       matrix:
       matrix:
-        language: [ 'javascript' ]
+        language: [ 'javascript', 'java' ]
         # CodeQL supports [ 'cpp', 'csharp', 'go', 'java', 'javascript', 'python' ]
         # CodeQL supports [ 'cpp', 'csharp', 'go', 'java', 'javascript', 'python' ]
         # Learn more:
         # Learn more:
         # https://docs.github.com/en/free-pro-team@latest/github/finding-security-vulnerabilities-and-errors-in-your-code/configuring-code-scanning#changing-the-languages-that-are-analyzed
         # https://docs.github.com/en/free-pro-team@latest/github/finding-security-vulnerabilities-and-errors-in-your-code/configuring-code-scanning#changing-the-languages-that-are-analyzed
@@ -50,6 +52,13 @@ jobs:
         # Prefix the list here with "+" to use these queries and those in the config file.
         # Prefix the list here with "+" to use these queries and those in the config file.
         # queries: ./path/to/local/query, your-org/your-repo/queries@main
         # queries: ./path/to/local/query, your-org/your-repo/queries@main
 
 
+    - name: Set up JDK
+      uses: actions/setup-java@v3
+      with:
+          java-version: '17'
+          distribution: 'zulu'
+          cache: 'maven'
+
     # Autobuild attempts to build any compiled languages  (C/C++, C#, or Java).
     # Autobuild attempts to build any compiled languages  (C/C++, C#, or Java).
     # If this step fails, then you should remove it and run the build manually (see below)
     # If this step fails, then you should remove it and run the build manually (see below)
     - name: Autobuild
     - name: Autobuild

+ 28 - 0
.github/workflows/create-branch-for-helm.yaml

@@ -0,0 +1,28 @@
+name: prepare-helm-release
+on:
+  repository_dispatch:
+    types: [prepare-helm-release]
+jobs:
+  change-app-version:
+    runs-on: ubuntu-latest
+    steps:
+      - uses: actions/checkout@v3
+      - run: |
+          git config user.name github-actions
+          git config user.email github-actions@github.com
+      - name: Change versions
+        run: |
+          git checkout -b release-${{ github.event.client_payload.appversion}}
+          version=$(cat charts/kafka-ui/Chart.yaml  | grep version | awk '{print $2}')
+          version=${version%.*}.$((${version##*.}+1))
+          sed -i "s/version:.*/version: ${version}/" charts/kafka-ui/Chart.yaml
+          sed -i "s/appVersion:.*/appVersion: ${{ github.event.client_payload.appversion}}/" charts/kafka-ui/Chart.yaml
+          git add  charts/kafka-ui/Chart.yaml
+          git commit -m "release ${version}"
+          git push --set-upstream origin release-${{ github.event.client_payload.appversion}}
+      - name: Slack Notification
+        uses: rtCamp/action-slack-notify@v2
+        env:
+          SLACK_TITLE: "release-${{ github.event.client_payload.appversion}}"
+          SLACK_MESSAGE: "A new release of the helm chart has been prepared. Branch name: release-${{ github.event.client_payload.appversion}}"
+          SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK }}

+ 10 - 16
.github/workflows/cve.yaml

@@ -10,26 +10,20 @@ jobs:
     steps:
     steps:
       - uses: actions/checkout@v3
       - uses: actions/checkout@v3
 
 
-      - name: Cache local Maven repository
-        uses: actions/cache@v3
-        with:
-          path: ~/.m2/repository
-          key: ${{ runner.os }}-maven-${{ hashFiles('**/pom.xml') }}
-          restore-keys: |
-            ${{ runner.os }}-maven-
-
-      - name: Set up JDK 1.13
-        uses: actions/setup-java@v1
+      - name: Set up JDK
+        uses: actions/setup-java@v3
         with:
         with:
-          java-version: 1.13
+          java-version: '17'
+          distribution: 'zulu'
+          cache: 'maven'
 
 
       - name: Build project
       - name: Build project
         id: build
         id: build
         run: |
         run: |
-          mvn versions:set -DnewVersion=$GITHUB_SHA
-          mvn clean package -DskipTests
-          export VERSION=$(mvn -q -Dexec.executable=echo -Dexec.args='${project.version}' --non-recursive exec:exec)
-          echo "::set-output name=version::${VERSION}"
+          ./mvnw -B -ntp versions:set -DnewVersion=$GITHUB_SHA
+          ./mvnw -B -V -ntp clean package -DskipTests
+          export VERSION=$(./mvnw -q -Dexec.executable=echo -Dexec.args='${project.version}' --non-recursive exec:exec)
+          echo "version=${VERSION}" >> $GITHUB_OUTPUT
 
 
       - name: Set up QEMU
       - name: Set up QEMU
         uses: docker/setup-qemu-action@v2
         uses: docker/setup-qemu-action@v2
@@ -61,7 +55,7 @@ jobs:
           cache-to: type=local,dest=/tmp/.buildx-cache
           cache-to: type=local,dest=/tmp/.buildx-cache
 
 
       - name: Run CVE checks
       - name: Run CVE checks
-        uses: aquasecurity/trivy-action@0.7.1
+        uses: aquasecurity/trivy-action@0.8.0
         with:
         with:
           image-ref: "provectuslabs/kafka-ui:${{ steps.build.outputs.version }}"
           image-ref: "provectuslabs/kafka-ui:${{ steps.build.outputs.version }}"
           format: "table"
           format: "table"

+ 1 - 1
.github/workflows/delete-public-image.yml

@@ -13,7 +13,7 @@ jobs:
         run: |
         run: |
           echo
           echo
           tag='${{ github.event.pull_request.number }}'
           tag='${{ github.event.pull_request.number }}'
-          echo ::set-output name=tag::${tag}
+          echo "tag=${tag}" >> $GITHUB_OUTPUT
       - name: Configure AWS credentials for Kafka-UI account
       - name: Configure AWS credentials for Kafka-UI account
         uses: aws-actions/configure-aws-credentials@v1
         uses: aws-actions/configure-aws-credentials@v1
         with:
         with:

+ 2 - 2
.github/workflows/documentation.yaml

@@ -15,9 +15,9 @@ jobs:
     steps:
     steps:
       - uses: actions/checkout@v3
       - uses: actions/checkout@v3
       - name: Check URLs in files
       - name: Check URLs in files
-        uses: urlstechie/urlchecker-action@0.0.33
+        uses: urlstechie/urlchecker-action@0.0.34
         with:
         with:
           exclude_patterns: localhost,127.0.,192.168.
           exclude_patterns: localhost,127.0.,192.168.
-          exclude_urls: https://api.server,https://graph.microsoft.com/User.Read,https://dev-a63ggcut.auth0.com/
+          exclude_urls: https://api.server,https://graph.microsoft.com/User.Read,https://dev-a63ggcut.auth0.com/,http://main-schema-registry:8081,http://schema-registry:8081,http://another-yet-schema-registry:8081,http://another-schema-registry:8081
           print_all: false
           print_all: false
           file_types: .md
           file_types: .md

+ 18 - 22
.github/workflows/e2e-checks.yaml

@@ -15,13 +15,6 @@ jobs:
       - uses: actions/checkout@v3
       - uses: actions/checkout@v3
         with:
         with:
           ref: ${{ github.event.pull_request.head.sha }}
           ref: ${{ github.event.pull_request.head.sha }}
-      - name: Cache local Maven repository
-        uses: actions/cache@v3
-        with:
-          path: ~/.m2/repository
-          key: ${{ runner.os }}-maven-${{ hashFiles('**/pom.xml') }}
-          restore-keys: |
-            ${{ runner.os }}-maven-
       - name: Set the values
       - name: Set the values
         id: set_env_values
         id: set_env_values
         run: |
         run: |
@@ -30,15 +23,17 @@ jobs:
         id: pull_chrome
         id: pull_chrome
         run: |
         run: |
           docker pull selenium/standalone-chrome:103.0
           docker pull selenium/standalone-chrome:103.0
-      - name: Set up JDK 1.13
-        uses: actions/setup-java@v1
+      - name: Set up JDK
+        uses: actions/setup-java@v3
         with:
         with:
-          java-version: 1.13
+          java-version: '17'
+          distribution: 'zulu'
+          cache: 'maven'
       - name: Build with Maven
       - name: Build with Maven
         id: build_app
         id: build_app
         run: |
         run: |
-          mvn versions:set -DnewVersion=${{ github.event.pull_request.head.sha }}
-          mvn clean package -Pprod -Dmaven.test.skip=true ${{ github.event.inputs.extraMavenOptions }}
+          ./mvnw -B -ntp versions:set -DnewVersion=${{ github.event.pull_request.head.sha }}
+          ./mvnw -B -V -ntp clean package -Pprod -Dmaven.test.skip=true ${{ github.event.inputs.extraMavenOptions }}
       - name: compose app
       - name: compose app
         id: compose_app
         id: compose_app
         # use the following command until #819 will be fixed
         # use the following command until #819 will be fixed
@@ -46,8 +41,8 @@ jobs:
           docker-compose -f ./documentation/compose/e2e-tests.yaml up -d
           docker-compose -f ./documentation/compose/e2e-tests.yaml up -d
       - name: e2e run
       - name: e2e run
         run: |
         run: |
-          mvn versions:set -DnewVersion=${{ github.event.pull_request.head.sha }}
-          mvn -DQASEIO_API_TOKEN=${{ secrets.QASEIO_API_TOKEN }} -pl '!kafka-ui-api' test -Pprod
+          ./mvnw -B -ntp versions:set -DnewVersion=${{ github.event.pull_request.head.sha }}
+          ./mvnw -B -V -ntp -DQASEIO_API_TOKEN=${{ secrets.QASEIO_API_TOKEN }} -pl '!kafka-ui-api' test -Pprod
       - name: Generate allure report
       - name: Generate allure report
         uses: simple-elf/allure-report-action@master
         uses: simple-elf/allure-report-action@master
         if: always()
         if: always()
@@ -57,14 +52,15 @@ jobs:
           gh_pages: allure-results
           gh_pages: allure-results
           allure_report: allure-report
           allure_report: allure-report
           subfolder: allure-results
           subfolder: allure-results
-      - name: Deploy allure report to Github Pages
+          report_url: "http://kafkaui-allure-reports.s3-website.eu-central-1.amazonaws.com"
+      - uses: jakejarvis/s3-sync-action@master
         if: always()
         if: always()
-        uses: peaceiris/actions-gh-pages@v3
-        with:
-          github_token: ${{ secrets.GITHUB_TOKEN }}
-          publish_dir: allure-history
-          publish_branch: gh-pages
-          destination_dir: ./allure
+        env:
+          AWS_S3_BUCKET: 'kafkaui-allure-reports'
+          AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }}
+          AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
+          AWS_REGION: 'eu-central-1'
+          SOURCE_DIR: 'allure-history/allure-results'
       - name: Post the link to allure report
       - name: Post the link to allure report
         if: always()
         if: always()
         uses: Sibz/github-status-action@v1.1.6
         uses: Sibz/github-status-action@v1.1.6
@@ -73,7 +69,7 @@ jobs:
           context: "Test report"
           context: "Test report"
           state: "success"
           state: "success"
           sha: ${{ github.event.pull_request.head.sha  || github.sha }}
           sha: ${{ github.event.pull_request.head.sha  || github.sha }}
-          target_url: https://${{ github.repository_owner }}.github.io/kafka-ui/allure/allure-results/${{ github.run_number }}
+          target_url: http://kafkaui-allure-reports.s3-website.eu-central-1.amazonaws.com/${{ github.run_number }}
       - name: Dump docker logs on failure
       - name: Dump docker logs on failure
         if: failure()
         if: failure()
         uses: jwalton/gh-docker-logs@v2.2.1
         uses: jwalton/gh-docker-logs@v2.2.1

+ 9 - 15
.github/workflows/master.yaml

@@ -10,26 +10,20 @@ jobs:
     steps:
     steps:
       - uses: actions/checkout@v3
       - uses: actions/checkout@v3
 
 
-      - name: Cache local Maven repository
-        uses: actions/cache@v3
-        with:
-          path: ~/.m2/repository
-          key: ${{ runner.os }}-maven-${{ hashFiles('**/pom.xml') }}
-          restore-keys: |
-            ${{ runner.os }}-maven-
-
-      - name: Set up JDK 1.13
-        uses: actions/setup-java@v1
+      - name: Set up JDK
+        uses: actions/setup-java@v3
         with:
         with:
-          java-version: 1.13
+          java-version: '17'
+          distribution: 'zulu'
+          cache: 'maven'
 
 
       - name: Build
       - name: Build
         id: build
         id: build
         run: |
         run: |
-          mvn versions:set -DnewVersion=$GITHUB_SHA
-          mvn clean package -Pprod -DskipTests
-          export VERSION=$(mvn -q -Dexec.executable=echo -Dexec.args='${project.version}' --non-recursive exec:exec)
-          echo "::set-output name=version::${VERSION}"
+          ./mvnw -B -ntp versions:set -DnewVersion=$GITHUB_SHA
+          ./mvnw -V -B -ntp clean package -Pprod -DskipTests
+          export VERSION=$(./mvnw -q -Dexec.executable=echo -Dexec.args='${project.version}' --non-recursive exec:exec)
+          echo "version=${VERSION}" >> $GITHUB_OUTPUT
 #################
 #################
 #               #
 #               #
 # Docker images #
 # Docker images #

+ 1 - 0
.github/workflows/release-helm.yaml

@@ -29,6 +29,7 @@ jobs:
           git fetch origin
           git fetch origin
           git stash
           git stash
           git checkout -b gh-pages origin/gh-pages
           git checkout -b gh-pages origin/gh-pages
+          git pull
           helm repo index .
           helm repo index .
           git add -f ${MSG##*/} index.yaml
           git add -f ${MSG##*/} index.yaml
           git commit -m "release ${VERSION}"
           git commit -m "release ${VERSION}"

+ 30 - 0
.github/workflows/release-serde-api.yaml

@@ -0,0 +1,30 @@
+name: Release-serde-api
+on: workflow_dispatch
+
+jobs:
+  release-serde-api:
+    runs-on: ubuntu-latest
+    steps:
+      - uses: actions/checkout@v3
+        with:
+          fetch-depth: 0
+
+      - run: |
+          git config user.name github-actions
+          git config user.email github-actions@github.com
+
+      - name: Set up JDK
+        uses: actions/setup-java@v3
+        with:
+          java-version: "17"
+          distribution: "zulu"
+          cache: "maven"
+
+      - id: install-secret-key
+        name: Install GPG secret key
+        run: |
+          cat <(echo -e "${{ secrets.GPG_PRIVATE_KEY }}") | gpg --batch --import
+
+      - name: Publish to Maven Central
+        run: |
+          mvn source:jar  javadoc:jar  package  gpg:sign -Dgpg.passphrase=${{ secrets.GPG_PASSPHRASE }} -Dserver.username=${{ secrets.NEXUS_USERNAME }} -Dserver.password=${{ secrets.NEXUS_PASSWORD }} nexus-staging:deploy   -pl kafka-ui-serde-api  -s settings.xml

+ 17 - 41
.github/workflows/release.yaml

@@ -17,26 +17,20 @@ jobs:
           git config user.name github-actions
           git config user.name github-actions
           git config user.email github-actions@github.com
           git config user.email github-actions@github.com
 
 
-      - name: Cache local Maven repository
-        uses: actions/cache@v3
-        with:
-          path: ~/.m2/repository
-          key: ${{ runner.os }}-maven-${{ hashFiles('**/pom.xml') }}
-          restore-keys: |
-            ${{ runner.os }}-maven-
-
-      - name: Set up JDK 1.13
-        uses: actions/setup-java@v1
+      - name: Set up JDK
+        uses: actions/setup-java@v3
         with:
         with:
-          java-version: 1.13
+          java-version: '17'
+          distribution: 'zulu'
+          cache: 'maven'
 
 
       - name: Build with Maven
       - name: Build with Maven
         id: build
         id: build
         run: |
         run: |
-          mvn versions:set -DnewVersion=${{ github.event.release.tag_name }}
-          mvn clean package -Pprod -DskipTests
-          export VERSION=$(mvn -q -Dexec.executable=echo -Dexec.args='${project.version}' --non-recursive exec:exec)
-          echo ::set-output name=version::${VERSION}
+          ./mvnw -B -ntp versions:set -DnewVersion=${{ github.event.release.tag_name }}
+          ./mvnw -B -V -ntp clean package -Pprod -DskipTests
+          export VERSION=$(./mvnw -q -Dexec.executable=echo -Dexec.args='${project.version}' --non-recursive exec:exec)
+          echo "version=${VERSION}" >> $GITHUB_OUTPUT
 
 
       - name: Upload files to a GitHub release
       - name: Upload files to a GitHub release
         uses: svenstaro/upload-release-action@2.3.0
         uses: svenstaro/upload-release-action@2.3.0
@@ -93,32 +87,14 @@ jobs:
 
 
   charts:
   charts:
     runs-on: ubuntu-latest
     runs-on: ubuntu-latest
+    permissions:
+      contents: write
     needs: release
     needs: release
     steps:
     steps:
-      - uses: actions/checkout@v3
+      - name: Repository Dispatch
+        uses: peter-evans/repository-dispatch@v2
         with:
         with:
-          fetch-depth: 1
-
-      - run: |
-          git config user.name github-actions
-          git config user.email github-actions@github.com
-
-      - uses: azure/setup-helm@v1
-
-      - name: update chart version
-        run: |
-          export version=${{needs.release.outputs.version}}
-          sed -i "s/version:.*/version: ${version}/" charts/kafka-ui/Chart.yaml
-          sed -i "s/appVersion:.*/appVersion: ${version}/" charts/kafka-ui/Chart.yaml
-
-      - name: add chart
-        run: |
-          export VERSION=${{needs.release.outputs.version}}
-          MSG=$(helm package --app-version ${VERSION} charts/kafka-ui)
-          git fetch origin
-          git stash
-          git checkout -b gh-pages origin/gh-pages
-          helm repo index .
-          git add -f ${MSG##*/} index.yaml
-          git commit -m "release ${VERSION}"
-          git push
+          token: ${{ secrets.GITHUB_TOKEN }}
+          repository: provectus/kafka-ui
+          event-type: prepare-helm-release
+          client-payload: '{"appversion": "${{ needs.release.outputs.version }}"}'

+ 62 - 1
.github/workflows/separate_env_public_create.yml

@@ -8,8 +8,69 @@ on:
         default: 'demo'
         default: 'demo'
 
 
 jobs:
 jobs:
+  build:
+    runs-on: ubuntu-latest
+    steps:
+      - uses: actions/checkout@v3
+      - name: get branch name
+        id: extract_branch
+        run: |
+          tag="${{ github.event.inputs.ENV_NAME }}-$(date '+%F-%H-%M-%S')"
+          echo "tag=${tag}" >> $GITHUB_OUTPUT
+        env:
+          GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
+      - name: Set up JDK
+        uses: actions/setup-java@v3
+        with:
+          java-version: '17'
+          distribution: 'zulu'
+          cache: 'maven'
+      - name: Build
+        id: build
+        run: |
+          ./mvnw -B -ntp versions:set -DnewVersion=$GITHUB_SHA
+          ./mvnw -B -V -ntp clean package -Pprod -DskipTests
+          export VERSION=$(./mvnw -q -Dexec.executable=echo -Dexec.args='${project.version}' --non-recursive exec:exec)
+          echo "version=${VERSION}" >> $GITHUB_OUTPUT
+      - name: Set up QEMU
+        uses: docker/setup-qemu-action@v2
+      - name: Set up Docker Buildx
+        id: buildx
+        uses: docker/setup-buildx-action@v2
+      - name: Cache Docker layers
+        uses: actions/cache@v3
+        with:
+          path: /tmp/.buildx-cache
+          key: ${{ runner.os }}-buildx-${{ github.sha }}
+          restore-keys: |
+            ${{ runner.os }}-buildx-
+      - name: Configure AWS credentials for Kafka-UI account
+        uses: aws-actions/configure-aws-credentials@v1
+        with:
+          aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
+          aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
+          aws-region: eu-central-1
+      - name: Login to Amazon ECR
+        id: login-ecr
+        uses: aws-actions/amazon-ecr-login@v1
+      - name: Build and push
+        id: docker_build_and_push
+        uses: docker/build-push-action@v3
+        with:
+          builder: ${{ steps.buildx.outputs.name }}
+          context: kafka-ui-api
+          push: true
+          tags: 297478128798.dkr.ecr.eu-central-1.amazonaws.com/kafka-ui:${{ steps.extract_branch.outputs.tag }}
+          build-args: |
+            JAR_FILE=kafka-ui-api-${{ steps.build.outputs.version }}.jar
+          cache-from: type=local,src=/tmp/.buildx-cache
+          cache-to: type=local,dest=/tmp/.buildx-cache
+    outputs:
+      tag: ${{ steps.extract_branch.outputs.tag }}
+
   separate-env-create:
   separate-env-create:
     runs-on: ubuntu-latest
     runs-on: ubuntu-latest
+    needs: build
     steps:
     steps:
       - name: clone
       - name: clone
         run: |
         run: |
@@ -18,7 +79,7 @@ jobs:
       - name: separate env create
       - name: separate env create
         run: |
         run: |
           cd kafka-ui-infra/aws-infrastructure4eks/argocd/scripts
           cd kafka-ui-infra/aws-infrastructure4eks/argocd/scripts
-          bash separate_env_create.sh ${{ github.event.inputs.ENV_NAME }} ${{ secrets.FEATURE_TESTING_UI_PASSWORD }}
+          bash separate_env_create.sh ${{ github.event.inputs.ENV_NAME }} ${{ secrets.FEATURE_TESTING_UI_PASSWORD }} ${{ needs.build.outputs.tag }}
           git config --global user.email "kafka-ui-infra@provectus.com"
           git config --global user.email "kafka-ui-infra@provectus.com"
           git config --global user.name "kafka-ui-infra"
           git config --global user.name "kafka-ui-infra"
           git add -A
           git add -A

+ 0 - 4
.github/workflows/terraform-deploy.yml

@@ -35,10 +35,6 @@ jobs:
       - name: Terraform Install
       - name: Terraform Install
         uses: hashicorp/setup-terraform@v2
         uses: hashicorp/setup-terraform@v2
 
 
-      - name: Terraform format
-        id: fmt
-        run: cd $TF_DIR && terraform fmt -check
-
       - name: Terraform init
       - name: Terraform init
         id: init
         id: init
         run: cd $TF_DIR && terraform init --backend-config="../envs/pro/terraform-backend.tfvars"
         run: cd $TF_DIR && terraform init --backend-config="../envs/pro/terraform-backend.tfvars"

+ 0 - 117
.mvn/wrapper/MavenWrapperDownloader.java

@@ -1,117 +0,0 @@
-/*
- * Copyright 2007-present the original author or authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      https://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-import java.net.*;
-import java.io.*;
-import java.nio.channels.*;
-import java.util.Properties;
-
-public class MavenWrapperDownloader {
-
-    private static final String WRAPPER_VERSION = "0.5.6";
-    /**
-     * Default URL to download the maven-wrapper.jar from, if no 'downloadUrl' is provided.
-     */
-    private static final String DEFAULT_DOWNLOAD_URL = "https://repo.maven.apache.org/maven2/io/takari/maven-wrapper/"
-        + WRAPPER_VERSION + "/maven-wrapper-" + WRAPPER_VERSION + ".jar";
-
-    /**
-     * Path to the maven-wrapper.properties file, which might contain a downloadUrl property to
-     * use instead of the default one.
-     */
-    private static final String MAVEN_WRAPPER_PROPERTIES_PATH =
-            ".mvn/wrapper/maven-wrapper.properties";
-
-    /**
-     * Path where the maven-wrapper.jar will be saved to.
-     */
-    private static final String MAVEN_WRAPPER_JAR_PATH =
-            ".mvn/wrapper/maven-wrapper.jar";
-
-    /**
-     * Name of the property which should be used to override the default download url for the wrapper.
-     */
-    private static final String PROPERTY_NAME_WRAPPER_URL = "wrapperUrl";
-
-    public static void main(String args[]) {
-        System.out.println("- Downloader started");
-        File baseDirectory = new File(args[0]);
-        System.out.println("- Using base directory: " + baseDirectory.getAbsolutePath());
-
-        // If the maven-wrapper.properties exists, read it and check if it contains a custom
-        // wrapperUrl parameter.
-        File mavenWrapperPropertyFile = new File(baseDirectory, MAVEN_WRAPPER_PROPERTIES_PATH);
-        String url = DEFAULT_DOWNLOAD_URL;
-        if(mavenWrapperPropertyFile.exists()) {
-            FileInputStream mavenWrapperPropertyFileInputStream = null;
-            try {
-                mavenWrapperPropertyFileInputStream = new FileInputStream(mavenWrapperPropertyFile);
-                Properties mavenWrapperProperties = new Properties();
-                mavenWrapperProperties.load(mavenWrapperPropertyFileInputStream);
-                url = mavenWrapperProperties.getProperty(PROPERTY_NAME_WRAPPER_URL, url);
-            } catch (IOException e) {
-                System.out.println("- ERROR loading '" + MAVEN_WRAPPER_PROPERTIES_PATH + "'");
-            } finally {
-                try {
-                    if(mavenWrapperPropertyFileInputStream != null) {
-                        mavenWrapperPropertyFileInputStream.close();
-                    }
-                } catch (IOException e) {
-                    // Ignore ...
-                }
-            }
-        }
-        System.out.println("- Downloading from: " + url);
-
-        File outputFile = new File(baseDirectory.getAbsolutePath(), MAVEN_WRAPPER_JAR_PATH);
-        if(!outputFile.getParentFile().exists()) {
-            if(!outputFile.getParentFile().mkdirs()) {
-                System.out.println(
-                        "- ERROR creating output directory '" + outputFile.getParentFile().getAbsolutePath() + "'");
-            }
-        }
-        System.out.println("- Downloading to: " + outputFile.getAbsolutePath());
-        try {
-            downloadFileFromURL(url, outputFile);
-            System.out.println("Done");
-            System.exit(0);
-        } catch (Throwable e) {
-            System.out.println("- Error downloading");
-            e.printStackTrace();
-            System.exit(1);
-        }
-    }
-
-    private static void downloadFileFromURL(String urlString, File destination) throws Exception {
-        if (System.getenv("MVNW_USERNAME") != null && System.getenv("MVNW_PASSWORD") != null) {
-            String username = System.getenv("MVNW_USERNAME");
-            char[] password = System.getenv("MVNW_PASSWORD").toCharArray();
-            Authenticator.setDefault(new Authenticator() {
-                @Override
-                protected PasswordAuthentication getPasswordAuthentication() {
-                    return new PasswordAuthentication(username, password);
-                }
-            });
-        }
-        URL website = new URL(urlString);
-        ReadableByteChannel rbc;
-        rbc = Channels.newChannel(website.openStream());
-        FileOutputStream fos = new FileOutputStream(destination);
-        fos.getChannel().transferFrom(rbc, 0, Long.MAX_VALUE);
-        fos.close();
-        rbc.close();
-    }
-
-}

BIN
.mvn/wrapper/maven-wrapper.jar


+ 18 - 2
.mvn/wrapper/maven-wrapper.properties

@@ -1,2 +1,18 @@
-distributionUrl=https://repo.maven.apache.org/maven2/org/apache/maven/apache-maven/3.6.3/apache-maven-3.6.3-bin.zip
-wrapperUrl=https://repo.maven.apache.org/maven2/io/takari/maven-wrapper/0.5.6/maven-wrapper-0.5.6.jar
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+distributionUrl=https://repo.maven.apache.org/maven2/org/apache/maven/apache-maven/3.8.6/apache-maven-3.8.6-bin.zip
+wrapperUrl=https://repo.maven.apache.org/maven2/org/apache/maven/wrapper/maven-wrapper/3.1.1/maven-wrapper-3.1.1.jar

+ 13 - 0
README.md

@@ -30,6 +30,9 @@ the cloud.
 * **Browse Messages** — browse messages with JSON, plain text, and Avro encoding
 * **Browse Messages** — browse messages with JSON, plain text, and Avro encoding
 * **Dynamic Topic Configuration** — create and configure new topics with dynamic configuration
 * **Dynamic Topic Configuration** — create and configure new topics with dynamic configuration
 * **Configurable Authentification** — secure your installation with optional Github/Gitlab/Google OAuth 2.0
 * **Configurable Authentification** — secure your installation with optional Github/Gitlab/Google OAuth 2.0
+* **Custom serialization/deserialization plugins** - use a ready-to-go serde for your data like AWS Glue or Smile, or code your own!
+* **Role based access control** - [manage permissions](https://github.com/provectus/kafka-ui/wiki/RBAC-(role-based-access-control)) to access the UI with granular precision
+* **Data masking** - [obfuscate](https://github.com/provectus/kafka-ui/blob/master/documentation/guides/DataMasking.md) sensitive data in topic messages
 
 
 # The Interface
 # The Interface
 UI for Apache Kafka wraps major functions of Apache Kafka with an intuitive user interface.
 UI for Apache Kafka wraps major functions of Apache Kafka with an intuitive user interface.
@@ -76,6 +79,7 @@ We have plenty of [docker-compose files](documentation/compose/DOCKER_COMPOSE.md
 - [AWS IAM configuration](documentation/guides/AWS_IAM.md)
 - [AWS IAM configuration](documentation/guides/AWS_IAM.md)
 - [Docker-compose files](documentation/compose/DOCKER_COMPOSE.md)
 - [Docker-compose files](documentation/compose/DOCKER_COMPOSE.md)
 - [Connection to a secure broker](documentation/guides/SECURE_BROKER.md)
 - [Connection to a secure broker](documentation/guides/SECURE_BROKER.md)
+- [Configure seriliazation/deserialization plugins or code your own](documentation/guides/Serialization.md)
 
 
 ### Configuration File
 ### Configuration File
 Example of how to configure clusters in the [application-local.yml](https://github.com/provectus/kafka-ui/blob/master/kafka-ui-api/src/main/resources/application-local.yml) configuration file:
 Example of how to configure clusters in the [application-local.yml](https://github.com/provectus/kafka-ui/blob/master/kafka-ui-api/src/main/resources/application-local.yml) configuration file:
@@ -179,6 +183,10 @@ For example, if you want to use an environment variable to set the `name` parame
 |`KAFKA_CLUSTERS_0_KSQLDBSERVER` 	| KSQL DB server address
 |`KAFKA_CLUSTERS_0_KSQLDBSERVER` 	| KSQL DB server address
 |`KAFKA_CLUSTERS_0_KSQLDBSERVERAUTH_USERNAME` 	| KSQL DB server's basic authentication username
 |`KAFKA_CLUSTERS_0_KSQLDBSERVERAUTH_USERNAME` 	| KSQL DB server's basic authentication username
 |`KAFKA_CLUSTERS_0_KSQLDBSERVERAUTH_PASSWORD` 	| KSQL DB server's basic authentication password
 |`KAFKA_CLUSTERS_0_KSQLDBSERVERAUTH_PASSWORD` 	| KSQL DB server's basic authentication password
+|`KAFKA_CLUSTERS_0_KSQLDBSERVERSSL_KEYSTORELOCATION`   	|Path to the JKS keystore to communicate to KSQL DB
+|`KAFKA_CLUSTERS_0_KSQLDBSERVERSSL_KEYSTOREPASSWORD`   	|Password of the JKS keystore for KSQL DB
+|`KAFKA_CLUSTERS_0_KSQLDBSERVERSSL_TRUSTSTORELOCATION`   	|Path to the JKS truststore to communicate to KSQL DB
+|`KAFKA_CLUSTERS_0_KSQLDBSERVERSSL_TRUSTSTOREPASSWORD`   	|Password of the JKS truststore for KSQL DB
 |`KAFKA_CLUSTERS_0_PROPERTIES_SECURITY_PROTOCOL` 	|Security protocol to connect to the brokers. For SSL connection use "SSL", for plaintext connection don't set this environment variable
 |`KAFKA_CLUSTERS_0_PROPERTIES_SECURITY_PROTOCOL` 	|Security protocol to connect to the brokers. For SSL connection use "SSL", for plaintext connection don't set this environment variable
 |`KAFKA_CLUSTERS_0_SCHEMAREGISTRY`   	|SchemaRegistry's address
 |`KAFKA_CLUSTERS_0_SCHEMAREGISTRY`   	|SchemaRegistry's address
 |`KAFKA_CLUSTERS_0_SCHEMAREGISTRYAUTH_USERNAME`   	|SchemaRegistry's basic authentication username
 |`KAFKA_CLUSTERS_0_SCHEMAREGISTRYAUTH_USERNAME`   	|SchemaRegistry's basic authentication username
@@ -196,8 +204,13 @@ For example, if you want to use an environment variable to set the `name` parame
 |`KAFKA_CLUSTERS_0_KAFKACONNECT_0_ADDRESS` |Address of the Kafka Connect service endpoint
 |`KAFKA_CLUSTERS_0_KAFKACONNECT_0_ADDRESS` |Address of the Kafka Connect service endpoint
 |`KAFKA_CLUSTERS_0_KAFKACONNECT_0_USERNAME`| Kafka Connect cluster's basic authentication username
 |`KAFKA_CLUSTERS_0_KAFKACONNECT_0_USERNAME`| Kafka Connect cluster's basic authentication username
 |`KAFKA_CLUSTERS_0_KAFKACONNECT_0_PASSWORD`| Kafka Connect cluster's basic authentication password
 |`KAFKA_CLUSTERS_0_KAFKACONNECT_0_PASSWORD`| Kafka Connect cluster's basic authentication password
+|`KAFKA_CLUSTERS_0_KAFKACONNECT_0_KEYSTORELOCATION`| Path to the JKS keystore to communicate to Kafka Connect
+|`KAFKA_CLUSTERS_0_KAFKACONNECT_0_KEYSTOREPASSWORD`| Password of the JKS keystore for Kafka Connect
+|`KAFKA_CLUSTERS_0_KAFKACONNECT_0_TRUSTSTORELOCATION`| Path to the JKS truststore to communicate to Kafka Connect
+|`KAFKA_CLUSTERS_0_KAFKACONNECT_0_TRUSTSTOREPASSWORD`| Password of the JKS truststore for Kafka Connect
 |`KAFKA_CLUSTERS_0_METRICS_SSL`          |Enable SSL for Metrics? `true` or `false`. For advanced setup, see `kafka-ui-jmx-secured.yml`
 |`KAFKA_CLUSTERS_0_METRICS_SSL`          |Enable SSL for Metrics? `true` or `false`. For advanced setup, see `kafka-ui-jmx-secured.yml`
 |`KAFKA_CLUSTERS_0_METRICS_USERNAME` |Username for Metrics authentication
 |`KAFKA_CLUSTERS_0_METRICS_USERNAME` |Username for Metrics authentication
 |`KAFKA_CLUSTERS_0_METRICS_PASSWORD` |Password for Metrics authentication
 |`KAFKA_CLUSTERS_0_METRICS_PASSWORD` |Password for Metrics authentication
+|`KAFKA_CLUSTERS_0_POLLING_THROTTLE_RATE` |Max traffic rate (bytes/sec) that kafka-ui allowed to reach when polling messages from the cluster. Default: 0 (not limited)
 |`TOPIC_RECREATE_DELAY_SECONDS` |Time delay between topic deletion and topic creation attempts for topic recreate functionality. Default: 1
 |`TOPIC_RECREATE_DELAY_SECONDS` |Time delay between topic deletion and topic creation attempts for topic recreate functionality. Default: 1
 |`TOPIC_RECREATE_MAXRETRIES`  |Number of attempts of topic creation after topic deletion for topic recreate functionality. Default: 15
 |`TOPIC_RECREATE_MAXRETRIES`  |Number of attempts of topic creation after topic deletion for topic recreate functionality. Default: 15

+ 2 - 1
SECURITY.md

@@ -6,7 +6,8 @@ Following versions of the project are currently being supported with security up
 
 
 | Version | Supported          |
 | Version | Supported          |
 | ------- | ------------------ |
 | ------- | ------------------ |
-| 0.4.x   | :white_check_mark: |
+| 0.5.x   | :white_check_mark: |
+| 0.4.x   | :x: |
 | 0.3.x   | :x:                |
 | 0.3.x   | :x:                |
 | 0.2.x   | :x:                |
 | 0.2.x   | :x:                |
 | 0.1.x   | :x:                |
 | 0.1.x   | :x:                |

+ 2 - 2
charts/kafka-ui/Chart.yaml

@@ -2,6 +2,6 @@ apiVersion: v2
 name: kafka-ui
 name: kafka-ui
 description: A Helm chart for kafka-UI
 description: A Helm chart for kafka-UI
 type: application
 type: application
-version: 0.4.4
-appVersion: v0.4.0
+version: 0.5.1
+appVersion: v0.5.0
 icon: https://github.com/provectus/kafka-ui/raw/master/documentation/images/kafka-ui-logo.png
 icon: https://github.com/provectus/kafka-ui/raw/master/documentation/images/kafka-ui-logo.png

+ 11 - 1
charts/kafka-ui/templates/deployment.yaml

@@ -4,6 +4,10 @@ metadata:
   name: {{ include "kafka-ui.fullname" . }}
   name: {{ include "kafka-ui.fullname" . }}
   labels:
   labels:
     {{- include "kafka-ui.labels" . | nindent 4 }}
     {{- include "kafka-ui.labels" . | nindent 4 }}
+  {{- with .Values.annotations }}
+  annotations:
+    {{- toYaml . | nindent 4 }}
+  {{- end }}
 spec:
 spec:
 {{- if not .Values.autoscaling.enabled }}
 {{- if not .Values.autoscaling.enabled }}
   replicas: {{ .Values.replicaCount }}
   replicas: {{ .Values.replicaCount }}
@@ -49,7 +53,7 @@ spec:
               {{- toYaml . | nindent 12 }}
               {{- toYaml . | nindent 12 }}
             {{- end }}
             {{- end }}
             {{- if or .Values.yamlApplicationConfig .Values.yamlApplicationConfigConfigMap}}
             {{- if or .Values.yamlApplicationConfig .Values.yamlApplicationConfigConfigMap}}
-            - name: SPRING_CONFIG_LOCATION
+            - name: SPRING_CONFIG_ADDITIONAL-LOCATION
               {{- if .Values.yamlApplicationConfig }}
               {{- if .Values.yamlApplicationConfig }}
               value: /kafka-ui/config.yml
               value: /kafka-ui/config.yml
               {{- else if .Values.yamlApplicationConfigConfigMap }}
               {{- else if .Values.yamlApplicationConfigConfigMap }}
@@ -83,6 +87,9 @@ spec:
               {{- $contextPath := .Values.envs.config.SERVER_SERVLET_CONTEXT_PATH | default "" | printf "%s/actuator/health" | urlParse }}
               {{- $contextPath := .Values.envs.config.SERVER_SERVLET_CONTEXT_PATH | default "" | printf "%s/actuator/health" | urlParse }}
               path: {{ get $contextPath "path" }}
               path: {{ get $contextPath "path" }}
               port: http
               port: http
+              {{- if .Values.probes.useHttpsScheme }}
+              scheme: HTTPS
+              {{- end }}
             initialDelaySeconds: 60
             initialDelaySeconds: 60
             periodSeconds: 30
             periodSeconds: 30
             timeoutSeconds: 10
             timeoutSeconds: 10
@@ -91,6 +98,9 @@ spec:
               {{- $contextPath := .Values.envs.config.SERVER_SERVLET_CONTEXT_PATH | default "" | printf "%s/actuator/health" | urlParse }}
               {{- $contextPath := .Values.envs.config.SERVER_SERVLET_CONTEXT_PATH | default "" | printf "%s/actuator/health" | urlParse }}
               path: {{ get $contextPath "path" }}
               path: {{ get $contextPath "path" }}
               port: http
               port: http
+              {{- if .Values.probes.useHttpsScheme }}
+              scheme: HTTPS
+              {{- end }}
             initialDelaySeconds: 60
             initialDelaySeconds: 60
             periodSeconds: 30
             periodSeconds: 30
             timeoutSeconds: 10
             timeoutSeconds: 10

+ 10 - 1
charts/kafka-ui/values.yaml

@@ -69,6 +69,15 @@ networkPolicy:
 podAnnotations: {}
 podAnnotations: {}
 podLabels: {}
 podLabels: {}
 
 
+## Annotations to be added to kafka-ui Deployment
+##
+annotations: {}
+
+## Set field schema as HTTPS for readines and liveness probe
+##
+probes:
+  useHttpsScheme: false
+
 podSecurityContext:
 podSecurityContext:
   {}
   {}
   # fsGroup: 2000
   # fsGroup: 2000
@@ -100,7 +109,7 @@ ingress:
   ingressClassName: ""
   ingressClassName: ""
 
 
   # The path for the Ingress
   # The path for the Ingress
-  path: ""
+  path: "/"
 
 
   # The hostname for the Ingress
   # The hostname for the Ingress
   host: ""
   host: ""

+ 54 - 12
documentation/compose/e2e-tests.yaml

@@ -7,10 +7,18 @@ services:
     image: provectuslabs/kafka-ui:latest
     image: provectuslabs/kafka-ui:latest
     ports:
     ports:
       - 8080:8080
       - 8080:8080
+    healthcheck:
+      test: wget --no-verbose --tries=1 --spider  http://localhost:8080/actuator/health
+      interval: 30s
+      timeout: 10s
+      retries: 10  
     depends_on:
     depends_on:
-      - kafka0
-      - schemaregistry0
-      - kafka-connect0
+        kafka0:
+          condition: service_healthy
+        schemaregistry0:
+          condition: service_healthy
+        kafka-connect0:
+          condition: service_healthy
     environment:
     environment:
       KAFKA_CLUSTERS_0_NAME: local
       KAFKA_CLUSTERS_0_NAME: local
       KAFKA_CLUSTERS_0_BOOTSTRAPSERVERS: kafka0:29092
       KAFKA_CLUSTERS_0_BOOTSTRAPSERVERS: kafka0:29092
@@ -24,6 +32,11 @@ services:
     image: confluentinc/cp-kafka:7.2.1
     image: confluentinc/cp-kafka:7.2.1
     hostname: kafka0
     hostname: kafka0
     container_name: kafka0
     container_name: kafka0
+    healthcheck:
+     test: unset JMX_PORT && KAFKA_JMX_OPTS="-Dcom.sun.management.jmxremote -Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.ssl=false -Djava.rmi.server.hostname=kafka0 -Dcom.sun.management.jmxremote.rmi.port=9999" && kafka-broker-api-versions --bootstrap-server=localhost:9092
+     interval: 30s
+     timeout: 10s
+     retries: 10
     ports:
     ports:
       - "9092:9092"
       - "9092:9092"
       - "9997:9997"
       - "9997:9997"
@@ -54,7 +67,13 @@ services:
     ports:
     ports:
       - 8085:8085
       - 8085:8085
     depends_on:
     depends_on:
-      - kafka0
+      kafka0:
+          condition: service_healthy
+    healthcheck:
+     test: ["CMD", "timeout", "1", "curl", "--silent", "--fail", "http://schemaregistry0:8085/subjects"]
+     interval: 30s
+     timeout: 10s
+     retries: 10
     environment:
     environment:
       SCHEMA_REGISTRY_KAFKASTORE_BOOTSTRAP_SERVERS: PLAINTEXT://kafka0:29092
       SCHEMA_REGISTRY_KAFKASTORE_BOOTSTRAP_SERVERS: PLAINTEXT://kafka0:29092
       SCHEMA_REGISTRY_KAFKASTORE_SECURITY_PROTOCOL: PLAINTEXT
       SCHEMA_REGISTRY_KAFKASTORE_SECURITY_PROTOCOL: PLAINTEXT
@@ -73,8 +92,15 @@ services:
     ports:
     ports:
       - 8083:8083
       - 8083:8083
     depends_on:
     depends_on:
-      - kafka0
-      - schemaregistry0
+      kafka0:
+          condition: service_healthy
+      schemaregistry0:
+          condition: service_healthy
+    healthcheck:
+      test: ["CMD", "nc", "127.0.0.1", "8083"]
+      interval: 30s
+      timeout: 10s
+      retries: 10
     environment:
     environment:
       CONNECT_BOOTSTRAP_SERVERS: kafka0:29092
       CONNECT_BOOTSTRAP_SERVERS: kafka0:29092
       CONNECT_GROUP_ID: compose-connect-group
       CONNECT_GROUP_ID: compose-connect-group
@@ -100,7 +126,8 @@ services:
     volumes:
     volumes:
       - ./message.json:/data/message.json
       - ./message.json:/data/message.json
     depends_on:
     depends_on:
-      - kafka0
+      kafka0:
+          condition: service_healthy
     command: "bash -c 'echo Waiting for Kafka to be ready... && \
     command: "bash -c 'echo Waiting for Kafka to be ready... && \
                cub kafka-ready -b kafka0:29092 1 30 && \
                cub kafka-ready -b kafka0:29092 1 30 && \
                kafka-topics --create --topic users --partitions 3 --replication-factor 1 --if-not-exists --bootstrap-server kafka0:29092 && \
                kafka-topics --create --topic users --partitions 3 --replication-factor 1 --if-not-exists --bootstrap-server kafka0:29092 && \
@@ -114,6 +141,11 @@ services:
         image: postgres:9.6.22
         image: postgres:9.6.22
     ports:
     ports:
       - 5432:5432
       - 5432:5432
+    healthcheck:
+      test: ["CMD-SHELL", "pg_isready -U dev_user"]
+      interval: 10s
+      timeout: 5s
+      retries: 5  
     environment:
     environment:
       POSTGRES_USER: 'dev_user'
       POSTGRES_USER: 'dev_user'
       POSTGRES_PASSWORD: '12345'
       POSTGRES_PASSWORD: '12345'
@@ -121,18 +153,28 @@ services:
   create-connectors:
   create-connectors:
     image: ellerbrock/alpine-bash-curl-ssl
     image: ellerbrock/alpine-bash-curl-ssl
     depends_on:
     depends_on:
-      - postgres-db
-      - kafka-connect0
+      postgres-db:
+          condition: service_healthy
+      kafka-connect0:
+        condition: service_healthy
     volumes:
     volumes:
       - ./connectors:/connectors
       - ./connectors:/connectors
     command: bash -c '/connectors/start.sh'
     command: bash -c '/connectors/start.sh'
 
 
   ksqldb:
   ksqldb:
     image: confluentinc/ksqldb-server:0.18.0
     image: confluentinc/ksqldb-server:0.18.0
+    healthcheck:
+      test: ["CMD", "timeout", "1", "curl", "--silent", "--fail", "http://localhost:8088/info"]
+      interval: 30s
+      timeout: 10s
+      retries: 10
     depends_on:
     depends_on:
-      - kafka0
-      - kafka-connect0
-      - schemaregistry0
+      kafka0:
+        condition: service_healthy
+      kafka-connect0:
+        condition: service_healthy
+      schemaregistry0:
+         condition: service_healthy
     ports:
     ports:
       - 8088:8088
       - 8088:8088
     environment:
     environment:

+ 180 - 0
documentation/compose/kafka-ssl-components.yaml

@@ -0,0 +1,180 @@
+---
+version: '3.4'
+services:
+  kafka-ui:
+    container_name: kafka-ui
+    image: provectuslabs/kafka-ui:latest
+    ports:
+      - 8080:8080
+    depends_on:
+      - kafka0
+      - schemaregistry0
+      - kafka-connect0
+      - ksqldb0
+    environment:
+      KAFKA_CLUSTERS_0_NAME: local
+      KAFKA_CLUSTERS_0_PROPERTIES_SECURITY_PROTOCOL: SSL
+      KAFKA_CLUSTERS_0_BOOTSTRAPSERVERS: kafka0:29092 # SSL LISTENER!
+      KAFKA_CLUSTERS_0_PROPERTIES_SSL_TRUSTSTORE_LOCATION: /kafka.truststore.jks
+      KAFKA_CLUSTERS_0_PROPERTIES_SSL_TRUSTSTORE_PASSWORD: secret
+      KAFKA_CLUSTERS_0_PROPERTIES_SSL_KEYSTORE_LOCATION: /kafka.keystore.jks
+      KAFKA_CLUSTERS_0_PROPERTIES_SSL_KEYSTORE_PASSWORD: secret
+      KAFKA_CLUSTERS_0_PROPERTIES_SSL_ENDPOINT_IDENTIFICATION_ALGORITHM: '' # DISABLE COMMON NAME VERIFICATION
+      KAFKA_CLUSTERS_0_SCHEMAREGISTRY: https://schemaregistry0:8085
+      KAFKA_CLUSTERS_0_SCHEMAREGISTRYSSL_KEYSTORELOCATION: /kafka.keystore.jks
+      KAFKA_CLUSTERS_0_SCHEMAREGISTRYSSL_KEYSTOREPASSWORD: "secret"
+      KAFKA_CLUSTERS_0_SCHEMAREGISTRYSSL_TRUSTSTORELOCATION: /kafka.truststore.jks
+      KAFKA_CLUSTERS_0_SCHEMAREGISTRYSSL_TRUSTSTOREPASSWORD: "secret"
+      KAFKA_CLUSTERS_0_KSQLDBSERVER: https://ksqldb0:8088
+      KAFKA_CLUSTERS_0_KSQLDBSERVERSSL_KEYSTORELOCATION: /kafka.keystore.jks
+      KAFKA_CLUSTERS_0_KSQLDBSERVERSSL_KEYSTOREPASSWORD: "secret"
+      KAFKA_CLUSTERS_0_KSQLDBSERVERSSL_TRUSTSTORELOCATION: /kafka.truststore.jks
+      KAFKA_CLUSTERS_0_KSQLDBSERVERSSL_TRUSTSTOREPASSWORD: "secret"
+      KAFKA_CLUSTERS_0_KAFKACONNECT_0_NAME: local
+      KAFKA_CLUSTERS_0_KAFKACONNECT_0_ADDRESS: https://kafka-connect0:8083
+      KAFKA_CLUSTERS_0_KAFKACONNECT_0_KEYSTORELOCATION: /kafka.keystore.jks
+      KAFKA_CLUSTERS_0_KAFKACONNECT_0_KEYSTOREPASSWORD: "secret"
+      KAFKA_CLUSTERS_0_KAFKACONNECT_0_TRUSTSTORELOCATION: /kafka.truststore.jks
+      KAFKA_CLUSTERS_0_KAFKACONNECT_0_TRUSTSTOREPASSWORD: "secret"
+    volumes:
+      - ./ssl/kafka.truststore.jks:/kafka.truststore.jks
+      - ./ssl/kafka.keystore.jks:/kafka.keystore.jks
+
+  kafka0:
+    image: confluentinc/cp-kafka:7.2.1
+    hostname: kafka0
+    container_name: kafka0
+    ports:
+      - "9092:9092"
+      - "9997:9997"
+    environment:
+      KAFKA_BROKER_ID: 1
+      KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: 'CONTROLLER:PLAINTEXT,SSL:SSL,PLAINTEXT_HOST:PLAINTEXT'
+      KAFKA_ADVERTISED_LISTENERS: 'SSL://kafka0:29092,PLAINTEXT_HOST://localhost:9092'
+      KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 1
+      KAFKA_GROUP_INITIAL_REBALANCE_DELAY_MS: 0
+      KAFKA_TRANSACTION_STATE_LOG_MIN_ISR: 1
+      KAFKA_TRANSACTION_STATE_LOG_REPLICATION_FACTOR: 1
+      KAFKA_JMX_PORT: 9997
+      KAFKA_JMX_HOSTNAME: localhost
+      KAFKA_PROCESS_ROLES: 'broker,controller'
+      KAFKA_NODE_ID: 1
+      KAFKA_CONTROLLER_QUORUM_VOTERS: '1@kafka0:29093'
+      KAFKA_LISTENERS: 'SSL://kafka0:29092,CONTROLLER://kafka0:29093,PLAINTEXT_HOST://0.0.0.0:9092'
+      KAFKA_INTER_BROKER_LISTENER_NAME: 'SSL'
+      KAFKA_CONTROLLER_LISTENER_NAMES: 'CONTROLLER'
+      KAFKA_LOG_DIRS: '/tmp/kraft-combined-logs'
+      KAFKA_SECURITY_PROTOCOL: SSL
+      KAFKA_SSL_ENABLED_MECHANISMS: PLAIN,SSL
+      KAFKA_SSL_KEYSTORE_FILENAME: kafka.keystore.jks
+      KAFKA_SSL_KEYSTORE_CREDENTIALS: creds
+      KAFKA_SSL_KEY_CREDENTIALS: creds
+      KAFKA_SSL_TRUSTSTORE_FILENAME: kafka.truststore.jks
+      KAFKA_SSL_TRUSTSTORE_CREDENTIALS: creds
+      #KAFKA_SSL_CLIENT_AUTH: 'required'
+      KAFKA_SSL_CLIENT_AUTH: 'requested'
+      KAFKA_SSL_ENDPOINT_IDENTIFICATION_ALGORITHM: '' # COMMON NAME VERIFICATION IS DISABLED SERVER-SIDE
+    volumes:
+      - ./scripts/update_run.sh:/tmp/update_run.sh
+      - ./ssl/creds:/etc/kafka/secrets/creds
+      - ./ssl/kafka.truststore.jks:/etc/kafka/secrets/kafka.truststore.jks
+      - ./ssl/kafka.keystore.jks:/etc/kafka/secrets/kafka.keystore.jks
+    command: "bash -c 'if [ ! -f /tmp/update_run.sh ]; then echo \"ERROR: Did you forget the update_run.sh file that came with this docker-compose.yml file?\" && exit 1 ; else /tmp/update_run.sh && /etc/confluent/docker/run ; fi'"
+
+  schemaregistry0:
+    image: confluentinc/cp-schema-registry:7.2.1
+    depends_on:
+      - kafka0
+    environment:
+      SCHEMA_REGISTRY_KAFKASTORE_BOOTSTRAP_SERVERS: SSL://kafka0:29092
+      SCHEMA_REGISTRY_KAFKASTORE_SECURITY_PROTOCOL: SSL
+      SCHEMA_REGISTRY_KAFKASTORE_SSL_TRUSTSTORE_LOCATION: /kafka.truststore.jks
+      SCHEMA_REGISTRY_KAFKASTORE_SSL_TRUSTSTORE_PASSWORD: secret
+      SCHEMA_REGISTRY_KAFKASTORE_SSL_KEYSTORE_LOCATION: /kafka.keystore.jks
+      SCHEMA_REGISTRY_KAFKASTORE_SSL_KEYSTORE_PASSWORD: secret
+      SCHEMA_REGISTRY_KAFKASTORE_SSL_KEY_PASSWORD: secret
+      SCHEMA_REGISTRY_HOST_NAME: schemaregistry0
+      SCHEMA_REGISTRY_LISTENERS: https://schemaregistry0:8085
+      SCHEMA_REGISTRY_INTER_INSTANCE_PROTOCOL: https
+
+      SCHEMA_REGISTRY_SCHEMA_REGISTRY_INTER_INSTANCE_PROTOCOL: "https"
+      SCHEMA_REGISTRY_LOG4J_ROOT_LOGLEVEL: INFO
+      SCHEMA_REGISTRY_KAFKASTORE_TOPIC: _schemas
+      SCHEMA_REGISTRY_SSL_CLIENT_AUTHENTICATION: "REQUIRED"
+      SCHEMA_REGISTRY_SSL_TRUSTSTORE_LOCATION: /kafka.truststore.jks
+      SCHEMA_REGISTRY_SSL_TRUSTSTORE_PASSWORD: secret
+      SCHEMA_REGISTRY_SSL_KEYSTORE_LOCATION: /kafka.keystore.jks
+      SCHEMA_REGISTRY_SSL_KEYSTORE_PASSWORD: secret
+      SCHEMA_REGISTRY_SSL_KEY_PASSWORD: secret
+    ports:
+      - 8085:8085
+    volumes:
+      - ./ssl/kafka.truststore.jks:/kafka.truststore.jks
+      - ./ssl/kafka.keystore.jks:/kafka.keystore.jks
+
+  kafka-connect0:
+    image: confluentinc/cp-kafka-connect:7.2.1
+    ports:
+      - 8083:8083
+    depends_on:
+      - kafka0
+      - schemaregistry0
+    environment:
+      CONNECT_BOOTSTRAP_SERVERS: kafka0:29092
+      CONNECT_GROUP_ID: compose-connect-group
+      CONNECT_CONFIG_STORAGE_TOPIC: _connect_configs
+      CONNECT_CONFIG_STORAGE_REPLICATION_FACTOR: 1
+      CONNECT_OFFSET_STORAGE_TOPIC: _connect_offset
+      CONNECT_OFFSET_STORAGE_REPLICATION_FACTOR: 1
+      CONNECT_STATUS_STORAGE_TOPIC: _connect_status
+      CONNECT_STATUS_STORAGE_REPLICATION_FACTOR: 1
+      CONNECT_KEY_CONVERTER: org.apache.kafka.connect.storage.StringConverter
+      CONNECT_KEY_CONVERTER_SCHEMA_REGISTRY_URL: https://schemaregistry0:8085
+      CONNECT_VALUE_CONVERTER: org.apache.kafka.connect.storage.StringConverter
+      CONNECT_VALUE_CONVERTER_SCHEMA_REGISTRY_URL: https://schemaregistry0:8085
+      CONNECT_INTERNAL_KEY_CONVERTER: org.apache.kafka.connect.json.JsonConverter
+      CONNECT_INTERNAL_VALUE_CONVERTER: org.apache.kafka.connect.json.JsonConverter
+      CONNECT_REST_ADVERTISED_HOST_NAME: kafka-connect0
+      CONNECT_PLUGIN_PATH: "/usr/share/java,/usr/share/confluent-hub-components"
+      CONNECT_SECURITY_PROTOCOL: "SSL"
+      CONNECT_SSL_KEYSTORE_LOCATION: "/kafka.keystore.jks"
+      CONNECT_SSL_KEY_PASSWORD: "secret"
+      CONNECT_SSL_KEYSTORE_PASSWORD: "secret"
+      CONNECT_SSL_TRUSTSTORE_LOCATION: "/kafka.truststore.jks"
+      CONNECT_SSL_TRUSTSTORE_PASSWORD: "secret"
+      CONNECT_SSL_CLIENT_AUTH: "requested"
+      CONNECT_REST_ADVERTISED_LISTENER: "https"
+      CONNECT_LISTENERS: "https://kafka-connect0:8083"
+    volumes:
+      - ./ssl/kafka.truststore.jks:/kafka.truststore.jks
+      - ./ssl/kafka.keystore.jks:/kafka.keystore.jks
+
+  ksqldb0:
+    image: confluentinc/ksqldb-server:0.18.0
+    depends_on:
+      - kafka0
+      - kafka-connect0
+      - schemaregistry0
+    ports:
+      - 8088:8088
+    environment:
+      KSQL_CUB_KAFKA_TIMEOUT: 120
+      KSQL_LISTENERS: https://0.0.0.0:8088
+      KSQL_BOOTSTRAP_SERVERS: SSL://kafka0:29092
+      KSQL_SECURITY_PROTOCOL: SSL
+      KSQL_SSL_TRUSTSTORE_LOCATION: /kafka.truststore.jks
+      KSQL_SSL_TRUSTSTORE_PASSWORD: secret
+      KSQL_SSL_KEYSTORE_LOCATION: /kafka.keystore.jks
+      KSQL_SSL_KEYSTORE_PASSWORD: secret
+      KSQL_SSL_KEY_PASSWORD: secret
+      KSQL_SSL_CLIENT_AUTHENTICATION: REQUIRED
+      KSQL_KSQL_LOGGING_PROCESSING_STREAM_AUTO_CREATE: "true"
+      KSQL_KSQL_LOGGING_PROCESSING_TOPIC_AUTO_CREATE: "true"
+      KSQL_KSQL_CONNECT_URL: https://kafka-connect0:8083
+      KSQL_KSQL_SCHEMA_REGISTRY_URL: https://schemaregistry0:8085
+      KSQL_KSQL_SERVICE_ID: my_ksql_1
+      KSQL_KSQL_HIDDEN_TOPICS: '^_.*'
+      KSQL_CACHE_MAX_BYTES_BUFFERING: 0
+    volumes:
+      - ./ssl/kafka.truststore.jks:/kafka.truststore.jks
+      - ./ssl/kafka.keystore.jks:/kafka.keystore.jks

+ 111 - 0
documentation/compose/kafka-ui-serdes.yaml

@@ -0,0 +1,111 @@
+---
+version: '2'
+services:
+
+    kafka-ui:
+        container_name: kafka-ui
+        image: provectuslabs/kafka-ui:latest
+        ports:
+            - 8080:8080
+        depends_on:
+            - kafka0
+            - schemaregistry0
+        environment:
+            kafka.clusters.0.name: SerdeExampleCluster
+            kafka.clusters.0.bootstrapServers: kafka0:29092
+            kafka.clusters.0.schemaRegistry: http://schemaregistry0:8085
+            # optional auth and ssl properties for SR
+            #kafka.clusters.0.schemaRegistryAuth.username: "use"
+            #kafka.clusters.0.schemaRegistryAuth.password: "pswrd"
+            #kafka.clusters.0.schemaRegistrySSL.keystoreLocation: /kafka.keystore.jks
+            #kafka.clusters.0.schemaRegistrySSL.keystorePassword: "secret"
+            #kafka.clusters.0.schemaRegistrySSL.truststoreLocation: /kafka.truststore.jks
+            #kafka.clusters.0.schemaRegistrySSL.truststorePassword: "secret"
+
+            kafka.clusters.0.defaultKeySerde: Int32  #optional
+            kafka.clusters.0.defaultValueSerde: String #optional
+
+            kafka.clusters.0.serde.0.name: ProtobufFile
+            kafka.clusters.0.serde.0.topicKeysPattern: "topic1"
+            kafka.clusters.0.serde.0.topicValuesPattern: "topic1"
+            kafka.clusters.0.serde.0.properties.protobufFiles.0: /protofiles/key-types.proto
+            kafka.clusters.0.serde.0.properties.protobufFiles.1: /protofiles/values.proto
+            kafka.clusters.0.serde.0.properties.protobufMessageNameForKey: test.MyKey # default type for keys
+            kafka.clusters.0.serde.0.properties.protobufMessageName: test.MyValue # default type for values
+            kafka.clusters.0.serde.0.properties.protobufMessageNameForKeyByTopic.topic1: test.MySpecificTopicKey # keys type for topic "topic1"
+            kafka.clusters.0.serde.0.properties.protobufMessageNameByTopic.topic1: test.MySpecificTopicValue # values type for topic "topic1"
+
+            kafka.clusters.0.serde.1.name: String
+            #kafka.clusters.0.serde.1.properties.encoding: "UTF-16" #optional, default is UTF-8
+            kafka.clusters.0.serde.1.topicValuesPattern: "json-events|text-events"
+
+            kafka.clusters.0.serde.2.name: AsciiString
+            kafka.clusters.0.serde.2.className: com.provectus.kafka.ui.serdes.builtin.StringSerde
+            kafka.clusters.0.serde.2.properties.encoding: "ASCII"
+
+            kafka.clusters.0.serde.3.name: SchemaRegistry # will be configured automatically using cluster SR
+            kafka.clusters.0.serde.3.topicValuesPattern: "sr-topic.*"
+
+            kafka.clusters.0.serde.4.name: AnotherSchemaRegistry
+            kafka.clusters.0.serde.4.className: com.provectus.kafka.ui.serdes.builtin.sr.SchemaRegistrySerde
+            kafka.clusters.0.serde.4.properties.url: http://schemaregistry0:8085
+            kafka.clusters.0.serde.4.properties.keySchemaNameTemplate: "%s-key"
+            kafka.clusters.0.serde.4.properties.schemaNameTemplate: "%s-value"
+            #kafka.clusters.0.serde.4.topicValuesPattern: "sr2-topic.*"
+            # optional auth and ssl properties for SR:
+            #kafka.clusters.0.serde.4.properties.username: "user"
+            #kafka.clusters.0.serde.4.properties.password: "passw"
+            #kafka.clusters.0.serde.4.properties.keystoreLocation:  /kafka.keystore.jks
+            #kafka.clusters.0.serde.4.properties.keystorePassword: "secret"
+            #kafka.clusters.0.serde.4.properties.truststoreLocation: /kafka.truststore.jks
+            #kafka.clusters.0.serde.4.properties.truststorePassword: "secret"
+
+            kafka.clusters.0.serde.5.name: UInt64
+            kafka.clusters.0.serde.5.topicKeysPattern: "topic-with-uint64keys"
+        volumes:
+            - ./proto:/protofiles
+
+    kafka0:
+        image: confluentinc/cp-kafka:7.2.1
+        hostname: kafka0
+        container_name: kafka0
+        ports:
+            - "9092:9092"
+            - "9997:9997"
+        environment:
+            KAFKA_BROKER_ID: 1
+            KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: 'CONTROLLER:PLAINTEXT,PLAINTEXT:PLAINTEXT,PLAINTEXT_HOST:PLAINTEXT'
+            KAFKA_ADVERTISED_LISTENERS: 'PLAINTEXT://kafka0:29092,PLAINTEXT_HOST://localhost:9092'
+            KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 1
+            KAFKA_GROUP_INITIAL_REBALANCE_DELAY_MS: 0
+            KAFKA_TRANSACTION_STATE_LOG_MIN_ISR: 1
+            KAFKA_TRANSACTION_STATE_LOG_REPLICATION_FACTOR: 1
+            KAFKA_JMX_PORT: 9997
+            KAFKA_JMX_HOSTNAME: localhost
+            KAFKA_JMX_OPTS: -Dcom.sun.management.jmxremote -Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.ssl=false -Djava.rmi.server.hostname=kafka0 -Dcom.sun.management.jmxremote.rmi.port=9997
+            KAFKA_PROCESS_ROLES: 'broker,controller'
+            KAFKA_NODE_ID: 1
+            KAFKA_CONTROLLER_QUORUM_VOTERS: '1@kafka0:29093'
+            KAFKA_LISTENERS: 'PLAINTEXT://kafka0:29092,CONTROLLER://kafka0:29093,PLAINTEXT_HOST://0.0.0.0:9092'
+            KAFKA_INTER_BROKER_LISTENER_NAME: 'PLAINTEXT'
+            KAFKA_CONTROLLER_LISTENER_NAMES: 'CONTROLLER'
+            KAFKA_LOG_DIRS: '/tmp/kraft-combined-logs'
+        volumes:
+            - ./scripts/update_run.sh:/tmp/update_run.sh
+        command: "bash -c 'if [ ! -f /tmp/update_run.sh ]; then echo \"ERROR: Did you forget the update_run.sh file that came with this docker-compose.yml file?\" && exit 1 ; else /tmp/update_run.sh && /etc/confluent/docker/run ; fi'"
+
+    schemaregistry0:
+        image: confluentinc/cp-schema-registry:7.2.1
+        ports:
+            - 8085:8085
+        depends_on:
+            - kafka0
+        environment:
+            SCHEMA_REGISTRY_KAFKASTORE_BOOTSTRAP_SERVERS: PLAINTEXT://kafka0:29092
+            SCHEMA_REGISTRY_KAFKASTORE_SECURITY_PROTOCOL: PLAINTEXT
+            SCHEMA_REGISTRY_HOST_NAME: schemaregistry0
+            SCHEMA_REGISTRY_LISTENERS: http://schemaregistry0:8085
+
+            SCHEMA_REGISTRY_SCHEMA_REGISTRY_INTER_INSTANCE_PROTOCOL: "http"
+            SCHEMA_REGISTRY_LOG4J_ROOT_LOGLEVEL: INFO
+            SCHEMA_REGISTRY_KAFKASTORE_TOPIC: _schemas

+ 1 - 3
documentation/compose/kafka-ui.yaml

@@ -22,7 +22,7 @@ services:
       KAFKA_CLUSTERS_0_KAFKACONNECT_0_ADDRESS: http://kafka-connect0:8083
       KAFKA_CLUSTERS_0_KAFKACONNECT_0_ADDRESS: http://kafka-connect0:8083
       KAFKA_CLUSTERS_1_NAME: secondLocal
       KAFKA_CLUSTERS_1_NAME: secondLocal
       KAFKA_CLUSTERS_1_BOOTSTRAPSERVERS: kafka1:29092
       KAFKA_CLUSTERS_1_BOOTSTRAPSERVERS: kafka1:29092
-      KAFKA_CLUSTERS_0_METRICS_PORT: 9998
+      KAFKA_CLUSTERS_1_METRICS_PORT: 9998
       KAFKA_CLUSTERS_1_SCHEMAREGISTRY: http://schemaregistry1:8085
       KAFKA_CLUSTERS_1_SCHEMAREGISTRY: http://schemaregistry1:8085
 
 
   kafka0:
   kafka0:
@@ -41,7 +41,6 @@ services:
       KAFKA_TRANSACTION_STATE_LOG_MIN_ISR: 1
       KAFKA_TRANSACTION_STATE_LOG_MIN_ISR: 1
       KAFKA_TRANSACTION_STATE_LOG_REPLICATION_FACTOR: 1
       KAFKA_TRANSACTION_STATE_LOG_REPLICATION_FACTOR: 1
       KAFKA_JMX_PORT: 9997
       KAFKA_JMX_PORT: 9997
-      KAFKA_JMX_HOSTNAME: localhost
       KAFKA_JMX_OPTS: -Dcom.sun.management.jmxremote -Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.ssl=false -Djava.rmi.server.hostname=kafka0 -Dcom.sun.management.jmxremote.rmi.port=9997
       KAFKA_JMX_OPTS: -Dcom.sun.management.jmxremote -Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.ssl=false -Djava.rmi.server.hostname=kafka0 -Dcom.sun.management.jmxremote.rmi.port=9997
       KAFKA_PROCESS_ROLES: 'broker,controller'
       KAFKA_PROCESS_ROLES: 'broker,controller'
       KAFKA_NODE_ID: 1
       KAFKA_NODE_ID: 1
@@ -70,7 +69,6 @@ services:
       KAFKA_TRANSACTION_STATE_LOG_MIN_ISR: 1
       KAFKA_TRANSACTION_STATE_LOG_MIN_ISR: 1
       KAFKA_TRANSACTION_STATE_LOG_REPLICATION_FACTOR: 1
       KAFKA_TRANSACTION_STATE_LOG_REPLICATION_FACTOR: 1
       KAFKA_JMX_PORT: 9998
       KAFKA_JMX_PORT: 9998
-      KAFKA_JMX_HOSTNAME: localhost
       KAFKA_JMX_OPTS: -Dcom.sun.management.jmxremote -Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.ssl=false -Djava.rmi.server.hostname=kafka0 -Dcom.sun.management.jmxremote.rmi.port=9998
       KAFKA_JMX_OPTS: -Dcom.sun.management.jmxremote -Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.ssl=false -Djava.rmi.server.hostname=kafka0 -Dcom.sun.management.jmxremote.rmi.port=9998
       KAFKA_PROCESS_ROLES: 'broker,controller'
       KAFKA_PROCESS_ROLES: 'broker,controller'
       KAFKA_NODE_ID: 1
       KAFKA_NODE_ID: 1

+ 11 - 0
documentation/compose/proto/key-types.proto

@@ -0,0 +1,11 @@
+syntax = "proto3";
+package test;
+
+message MyKey {
+    string myKeyF1 = 1;
+}
+
+message MySpecificTopicKey {
+    string special_field1 = 1;
+    string special_field2 = 2;
+}

+ 12 - 0
documentation/compose/proto/values.proto

@@ -0,0 +1,12 @@
+syntax = "proto3";
+package test;
+
+message MySpecificTopicValue {
+    string f1 = 1;
+    string f2 = 2;
+}
+
+message MyValue {
+  int32 version = 1;
+  string payload = 2;
+}

+ 2 - 1
documentation/compose/ssl/generate_certs.sh

@@ -144,7 +144,8 @@ echo "Now the trust store's private key (CA) will sign the keystore's certificat
 echo
 echo
 openssl x509 -req -CA $CA_CERT_FILE -CAkey $trust_store_private_key_file \
 openssl x509 -req -CA $CA_CERT_FILE -CAkey $trust_store_private_key_file \
   -in $KEYSTORE_SIGN_REQUEST -out $KEYSTORE_SIGNED_CERT \
   -in $KEYSTORE_SIGN_REQUEST -out $KEYSTORE_SIGNED_CERT \
-  -days $VALIDITY_IN_DAYS -CAcreateserial
+  -days $VALIDITY_IN_DAYS -CAcreateserial \
+  -extensions kafka -extfile san.cnf
 # creates $KEYSTORE_SIGN_REQUEST_SRL which is never used or needed.
 # creates $KEYSTORE_SIGN_REQUEST_SRL which is never used or needed.
 
 
 echo
 echo

BIN
documentation/compose/ssl/kafka.keystore.jks


BIN
documentation/compose/ssl/kafka.truststore.jks


+ 2 - 0
documentation/compose/ssl/san.cnf

@@ -0,0 +1,2 @@
+[kafka]
+subjectAltName = DNS:kafka0,DNS:schemaregistry0,DNS:kafka-connect0,DNS:ksqldb0

+ 123 - 0
documentation/guides/DataMasking.md

@@ -0,0 +1,123 @@
+# Topics data masking
+
+You can configure kafka-ui to mask sensitive data shown in Messages page.
+
+Several masking policies supported:
+
+### REMOVE
+For json objects - remove target fields, otherwise - return "null" string.
+```yaml
+- type: REMOVE
+  fields: [ "id", "name" ]
+  ...
+```
+
+Apply examples:
+```
+{ "id": 1234, "name": { "first": "James" }, "age": 30 } 
+ ->
+{ "age": 30 } 
+```
+```
+non-json string -> null
+```
+
+### REPLACE
+For json objects - replace target field's values with specified replacement string (by default with `***DATA_MASKED***`). Note: if target field's value is object, then replacement applied to all its fields recursively (see example). 
+
+```yaml
+- type: REPLACE
+  fields: [ "id", "name" ]
+  replacement: "***"  #optional, "***DATA_MASKED***" by default
+  ...
+```
+
+Apply examples:
+```
+{ "id": 1234, "name": { "first": "James", "last": "Bond" }, "age": 30 } 
+ ->
+{ "id": "***", "name": { "first": "***", "last": "***" }, "age": 30 } 
+```
+```
+non-json string -> ***
+```
+
+### MASK
+Mask target field's values with specified masking characters, recursively (spaces and line separators will be kept as-is).
+`pattern` array specifies what symbols will be used to replace upper-case chars, lower-case chars, digits and other symbols correspondingly.
+
+```yaml
+- type: MASK
+  fields: [ "id", "name" ]
+  pattern: ["A", "a", "N", "_"]   # optional, default is ["X", "x", "n", "-"]
+  ...
+```
+
+Apply examples:
+```
+{ "id": 1234, "name": { "first": "James", "last": "Bond!" }, "age": 30 } 
+ ->
+{ "id": "NNNN", "name": { "first": "Aaaaa", "last": "Aaaa_" }, "age": 30 } 
+```
+```
+Some string! -> Aaaa aaaaaa_
+```
+
+----
+
+For each policy, if `fields` not specified, then policy will be applied to all object's fields or whole string if it is not a json-object.
+
+You can specify which masks will be applied to topic's keys/values. Multiple policies will be applied if topic matches both policy's patterns.
+
+Yaml configuration example:
+```yaml
+kafka:
+  clusters:
+    - name: ClusterName
+      # Other Cluster configuration omitted ... 
+      masking:
+        - type: REMOVE
+          fields: [ "id" ]
+          topicKeysPattern: "events-with-ids-.*"
+          topicValuesPattern: "events-with-ids-.*"
+          
+        - type: REPLACE
+          fields: [ "companyName", "organizationName" ]
+          replacement: "***MASKED_ORG_NAME***"   #optional
+          topicValuesPattern: "org-events-.*"
+        
+        - type: MASK
+          fields: [ "name", "surname" ]
+          pattern: ["A", "a", "N", "_"]  #optional
+          topicValuesPattern: "user-states"
+
+        - type: MASK
+          topicValuesPattern: "very-secured-topic"
+```
+
+Same configuration in env-vars fashion:
+```
+...
+KAFKA_CLUSTERS_0_MASKING_0_TYPE: REMOVE
+KAFKA_CLUSTERS_0_MASKING_0_FIELDS_0: "id"
+KAFKA_CLUSTERS_0_MASKING_0_TOPICKEYSPATTERN: "events-with-ids-.*"
+KAFKA_CLUSTERS_0_MASKING_0_TOPICVALUESPATTERN: "events-with-ids-.*"
+
+KAFKA_CLUSTERS_0_MASKING_1_TYPE: REPLACE
+KAFKA_CLUSTERS_0_MASKING_1_FIELDS_0: "companyName"
+KAFKA_CLUSTERS_0_MASKING_1_FIELDS_1: "organizationName"
+KAFKA_CLUSTERS_0_MASKING_1_REPLACEMENT: "***MASKED_ORG_NAME***"
+KAFKA_CLUSTERS_0_MASKING_1_TOPICVALUESPATTERN: "org-events-.*"
+
+KAFKA_CLUSTERS_0_MASKING_2_TYPE: MASK
+KAFKA_CLUSTERS_0_MASKING_2_FIELDS_0: "name"
+KAFKA_CLUSTERS_0_MASKING_2_FIELDS_1: "surname"
+KAFKA_CLUSTERS_0_MASKING_2_PATTERN_0: 'A'
+KAFKA_CLUSTERS_0_MASKING_2_PATTERN_1: 'a'
+KAFKA_CLUSTERS_0_MASKING_2_PATTERN_2: 'N'
+KAFKA_CLUSTERS_0_MASKING_2_PATTERN_3: '_'
+KAFKA_CLUSTERS_0_MASKING_2_TOPICVALUESPATTERN: "user-states"
+
+KAFKA_CLUSTERS_0_MASKING_3_TYPE: MASK
+KAFKA_CLUSTERS_0_MASKING_3_TOPICVALUESPATTERN: "very-secured-topic"
+```

+ 21 - 3
documentation/guides/Protobuf.md

@@ -1,5 +1,7 @@
 # Kafkaui Protobuf Support
 # Kafkaui Protobuf Support
 
 
+### This document is deprecated, please see examples in [Serialization document](Serialization.md).
+
 Kafkaui supports deserializing protobuf messages in two ways:
 Kafkaui supports deserializing protobuf messages in two ways:
 1. Using Confluent Schema Registry's [protobuf support](https://docs.confluent.io/platform/current/schema-registry/serdes-develop/serdes-protobuf.html).
 1. Using Confluent Schema Registry's [protobuf support](https://docs.confluent.io/platform/current/schema-registry/serdes-develop/serdes-protobuf.html).
 2. Supplying a protobuf file as well as a configuration that maps topic names to protobuf types.
 2. Supplying a protobuf file as well as a configuration that maps topic names to protobuf types.
@@ -11,11 +13,15 @@ To configure Kafkaui to deserialize protobuf messages using a supplied protobuf
 kafka:
 kafka:
   clusters:
   clusters:
     - # Cluster configuration omitted.
     - # Cluster configuration omitted.
-      # protobufFile is the path to the protobuf schema.
+      # protobufFile is the path to the protobuf schema. (deprecated: please use "protobufFiles")
       protobufFile: path/to/my.proto
       protobufFile: path/to/my.proto
+      # protobufFiles is the path to one or more protobuf schemas.
+      protobufFiles: 
+        - /path/to/my.proto
+        - /path/to/another.proto
       # protobufMessageName is the default protobuf type that is used to deserilize
       # protobufMessageName is the default protobuf type that is used to deserilize
       # the message's value if the topic is not found in protobufMessageNameByTopic.
       # the message's value if the topic is not found in protobufMessageNameByTopic.
-      protobufMessageName: my.Type1
+      protobufMessageName: my.DefaultValType
       # protobufMessageNameByTopic is a mapping of topic names to protobuf types.
       # protobufMessageNameByTopic is a mapping of topic names to protobuf types.
       # This mapping is required and is used to deserialize the Kafka message's value.
       # This mapping is required and is used to deserialize the Kafka message's value.
       protobufMessageNameByTopic:
       protobufMessageNameByTopic:
@@ -23,11 +29,23 @@ kafka:
         topic2: my.Type2
         topic2: my.Type2
       # protobufMessageNameForKey is the default protobuf type that is used to deserilize
       # protobufMessageNameForKey is the default protobuf type that is used to deserilize
       # the message's key if the topic is not found in protobufMessageNameForKeyByTopic.
       # the message's key if the topic is not found in protobufMessageNameForKeyByTopic.
-      protobufMessageNameForKey: my.Type1
+      protobufMessageNameForKey: my.DefaultKeyType
       # protobufMessageNameForKeyByTopic is a mapping of topic names to protobuf types.
       # protobufMessageNameForKeyByTopic is a mapping of topic names to protobuf types.
       # This mapping is optional and is used to deserialize the Kafka message's key.
       # This mapping is optional and is used to deserialize the Kafka message's key.
       # If a protobuf type is not found for a topic's key, the key is deserialized as a string,
       # If a protobuf type is not found for a topic's key, the key is deserialized as a string,
       # unless protobufMessageNameForKey is specified.
       # unless protobufMessageNameForKey is specified.
       protobufMessageNameForKeyByTopic:
       protobufMessageNameForKeyByTopic:
         topic1: my.KeyType1
         topic1: my.KeyType1
+```
+
+Same config with flattened config (for docker-compose):
+
+```text
+kafka.clusters.0.protobufFiles.0: /path/to/my.proto
+kafka.clusters.0.protobufFiles.1: /path/to/another.proto
+kafka.clusters.0.protobufMessageName: my.DefaultValType
+kafka.clusters.0.protobufMessageNameByTopic.topic1: my.Type1
+kafka.clusters.0.protobufMessageNameByTopic.topic2: my.Type2
+kafka.clusters.0.protobufMessageNameForKey: my.DefaultKeyType
+kafka.clusters.0.protobufMessageNameForKeyByTopic.topic1: my.KeyType1
 ```
 ```

+ 169 - 0
documentation/guides/Serialization.md

@@ -0,0 +1,169 @@
+## Serialization and deserialization and custom plugins
+
+Kafka-ui supports multiple ways to serialize/deserialize data.
+
+
+### Int32, Int64, UInt32, UInt64
+Big-endian 4/8 bytes representation of signed/unsigned integers.
+
+### Base64
+Base64 (RFC4648) binary data representation. Can be useful in case if the actual data is not important, but exactly the same (byte-wise) key/value should be send.
+
+### String 
+Treats binary data as a string in specified encoding. Default encoding is UTF-8.
+
+Class name: `com.provectus.kafka.ui.serdes.builtin.StringSerde`
+
+Sample configuration (if you want to overwrite default configuration):
+```yaml
+kafka:
+  clusters:
+    - name: Cluster1
+      # Other Cluster configuration omitted ... 
+      serdes:
+          # registering String serde with custom config
+        - name: AsciiString
+          className: com.provectus.kafka.ui.serdes.builtin.StringSerde
+          properties:
+            encoding: "ASCII"
+        
+          # overriding build-it String serde config   
+        - name: String 
+          properties:
+            encoding: "UTF-16"
+```
+
+### Protobuf
+
+Class name: `com.provectus.kafka.ui.serdes.builtin.ProtobufFileSerde`
+
+Sample configuration:
+```yaml
+kafka:
+  clusters:
+    - name: Cluster1
+      # Other Cluster configuration omitted ... 
+      serdes:
+        - name: ProtobufFile
+          properties:
+            # path to the protobuf schema files
+            protobufFiles:
+              - path/to/my.proto
+              - path/to/another.proto
+            # default protobuf type that is used for KEY serialization/deserialization
+            # optional
+            protobufMessageNameForKey: my.Type1
+            # mapping of topic names to protobuf types, that will be used for KEYS  serialization/deserialization
+            # optional
+            protobufMessageNameForKeyByTopic:
+              topic1: my.KeyType1
+              topic2: my.KeyType2
+            # default protobuf type that is used for VALUE serialization/deserialization
+            # optional, if not set - first type in file will be used as default
+            protobufMessageName: my.Type1
+            # mapping of topic names to protobuf types, that will be used for VALUES  serialization/deserialization
+            # optional
+            protobufMessageNameByTopic:
+              topic1: my.Type1
+              "topic.2": my.Type2
+```
+Docker-compose sample for Protobuf serialization is [here](../compose/kafka-ui-serdes.yaml).
+
+Legacy configuration for protobuf is [here](Protobuf.md).
+
+### SchemaRegistry
+SchemaRegistry serde is automatically configured if schema registry properties set on cluster level.
+But you can add new SchemaRegistry-typed serdes that will connect to another schema-registry instance. 
+
+Class name: `com.provectus.kafka.ui.serdes.builtin.sr.SchemaRegistrySerde`
+
+Sample configuration:
+```yaml
+kafka:
+  clusters:
+    - name: Cluster1
+      # this url will be used by "SchemaRegistry" by default
+      schemaRegistry: http://main-schema-registry:8081
+      serdes:
+        - name: AnotherSchemaRegistry
+          className: com.provectus.kafka.ui.serdes.builtin.sr.SchemaRegistrySerde
+          properties:
+            url:  http://another-schema-registry:8081
+            # auth properties, optional
+            username: nameForAuth
+            password: P@ssW0RdForAuth
+        
+          # and also add another SchemaRegistry serde
+        - name: ThirdSchemaRegistry
+          className: com.provectus.kafka.ui.serdes.builtin.sr.SchemaRegistrySerde
+          properties:
+            url:  http://another-yet-schema-registry:8081
+```
+
+## Setting serdes for specific topics
+You can specify preferable serde for topics key/value. This serde will be chosen by default in UI on topic's view/produce pages. 
+To do so, set `topicValuesPattern/topicValuesPattern` properties for the selected serde. Kafka-ui will choose a first serde that matches specified pattern.
+
+Sample configuration:
+```yaml
+kafka:
+  clusters:
+    - name: Cluster1
+      serdes:
+        - name: String
+          topicKeysPattern: click-events|imp-events
+        
+        - name: Int64
+          topicKeysPattern: ".*-events"
+        
+        - name: SchemaRegistry
+          topicValuesPattern: click-events|imp-events
+```
+
+
+## Default serdes
+You can specify which serde will be chosen in UI by default if no other serdes selected via `topicKeysPattern/topicValuesPattern` settings.
+
+Sample configuration:
+```yaml
+kafka:
+  clusters:
+    - name: Cluster1
+      defaultKeySerde: Int32
+      defaultValueSerde: String
+      serdes:
+        - name: Int32
+          topicKeysPattern: click-events|imp-events
+```
+
+## Fallback
+If selected serde couldn't be applied (exception was thrown), then fallback (String serde with UTF-8 encoding) serde will be applied. Such messages will be specially highlighted in UI.
+
+## Custom pluggable serde registration
+You can implement your own serde and register it in kafka-ui application.
+To do so:
+1. Add `kafka-ui-serde-api` dependency (should be downloadable via maven central)
+2. Implement `com.provectus.kafka.ui.serde.api.Serde` interface. See javadoc for implementation requirements.
+3. Pack your serde into uber jar, or provide directory with no-dependency jar and it's dependencies jars
+
+
+Example pluggable serdes :
+https://github.com/provectus/kafkaui-smile-serde
+https://github.com/provectus/kafkaui-glue-sr-serde
+
+Sample configuration:
+```yaml
+kafka:
+  clusters:
+    - name: Cluster1
+      serdes:
+        - name: MyCustomSerde
+          className: my.lovely.org.KafkaUiSerde
+          filePath: /var/lib/kui-serde/my-kui-serde.jar
+          
+        - name: MyCustomSerde2
+          className: my.lovely.org.KafkaUiSerde2
+          filePath: /var/lib/kui-serde2
+          properties:
+            prop1: v1
+```

+ 7 - 6
documentation/project/contributing/prerequisites.md

@@ -6,7 +6,7 @@ machine for local development.
 Before you begin contributing you must have:
 Before you begin contributing you must have:
 
 
 * A GitHub account
 * A GitHub account
-* `Java` 13 or newer
+* `Java` 17 or newer
 * `Git`
 * `Git`
 * `Docker`
 * `Docker`
 
 
@@ -17,18 +17,19 @@ Before you begin contributing you must have:
 ```sh
 ```sh
 brew cask
 brew cask
 ```
 ```
-3. Install JDK 13 via Homebrew cask:
+3. Install Eclipse Temurin 17 via Homebrew cask:
 ```sh
 ```sh
-brew tap adoptopenjdk/openjdk
-brew install adoptopenjdk13
+brew tap homebrew/cask-versions
+brew install temurin17
 ```
 ```
 4. Verify Installation
 4. Verify Installation
 ```sh
 ```sh
 java -version
 java -version
 ```
 ```
-Note : In case JAVA13 is not set as your default Java then you can consider to include JAVA13 in your PATH after installation
+Note : In case OpenJDK 17 is not set as your default Java, you can consider to include it in your `$PATH` after installation
 ```sh
 ```sh
-export PATH="/Library/Java/JavaVirtualMachines/adoptopenjdk-13.jdk/Contents/Home/bin:$PATH
+export PATH="$(/usr/libexec/java_home -v 17)/bin:$PATH"
+export JAVA_HOME="$(/usr/libexec/java_home -v 17)"
 ```
 ```
 
 
 ## Tips
 ## Tips

+ 3 - 3
kafka-ui-api/Dockerfile

@@ -1,7 +1,7 @@
-FROM alpine:3.16.2
+FROM azul/zulu-openjdk-alpine:17
 
 
-RUN apk add --no-cache openjdk13-jre libc6-compat gcompat \
-&& addgroup -S kafkaui && adduser -S kafkaui -G kafkaui
+RUN apk add --no-cache gcompat # need to make snappy codec work
+RUN addgroup -S kafkaui && adduser -S kafkaui -G kafkaui
 
 
 USER kafkaui
 USER kafkaui
 
 

+ 3 - 24
kafka-ui-api/pom.xml

@@ -20,18 +20,6 @@
         <sonar.language>java</sonar.language>
         <sonar.language>java</sonar.language>
     </properties>
     </properties>
 
 
-    <dependencyManagement>
-        <dependencies>
-            <dependency>
-                <groupId>org.springframework.boot</groupId>
-                <artifactId>spring-boot-dependencies</artifactId>
-                <version>${spring-boot.version}</version>
-                <type>pom</type>
-                <scope>import</scope>
-            </dependency>
-        </dependencies>
-    </dependencyManagement>
-
     <dependencies>
     <dependencies>
         <dependency>
         <dependency>
             <groupId>org.springframework.boot</groupId>
             <groupId>org.springframework.boot</groupId>
@@ -57,7 +45,7 @@
         <dependency>
         <dependency>
             <groupId>com.provectus</groupId>
             <groupId>com.provectus</groupId>
             <artifactId>kafka-ui-serde-api</artifactId>
             <artifactId>kafka-ui-serde-api</artifactId>
-            <version>${project.version}</version>
+            <version>${kafka-ui-serde-api.version}</version>
         </dependency>
         </dependency>
         <dependency>
         <dependency>
             <groupId>org.apache.kafka</groupId>
             <groupId>org.apache.kafka</groupId>
@@ -103,7 +91,7 @@
         <dependency>
         <dependency>
             <groupId>software.amazon.msk</groupId>
             <groupId>software.amazon.msk</groupId>
             <artifactId>aws-msk-iam-auth</artifactId>
             <artifactId>aws-msk-iam-auth</artifactId>
-            <version>1.1.3</version>
+            <version>1.1.5</version>
         </dependency>
         </dependency>
 
 
         <dependency>
         <dependency>
@@ -140,25 +128,21 @@
         <dependency>
         <dependency>
             <groupId>org.testcontainers</groupId>
             <groupId>org.testcontainers</groupId>
             <artifactId>testcontainers</artifactId>
             <artifactId>testcontainers</artifactId>
-            <version>${test.containers.version}</version>
             <scope>test</scope>
             <scope>test</scope>
         </dependency>
         </dependency>
         <dependency>
         <dependency>
             <groupId>org.testcontainers</groupId>
             <groupId>org.testcontainers</groupId>
             <artifactId>kafka</artifactId>
             <artifactId>kafka</artifactId>
-            <version>${test.containers.version}</version>
             <scope>test</scope>
             <scope>test</scope>
         </dependency>
         </dependency>
         <dependency>
         <dependency>
             <groupId>org.testcontainers</groupId>
             <groupId>org.testcontainers</groupId>
             <artifactId>junit-jupiter</artifactId>
             <artifactId>junit-jupiter</artifactId>
-            <version>${test.containers.version}</version>
             <scope>test</scope>
             <scope>test</scope>
         </dependency>
         </dependency>
         <dependency>
         <dependency>
             <groupId>org.junit.jupiter</groupId>
             <groupId>org.junit.jupiter</groupId>
             <artifactId>junit-jupiter-engine</artifactId>
             <artifactId>junit-jupiter-engine</artifactId>
-            <version>${junit-jupiter-engine.version}</version>
             <scope>test</scope>
             <scope>test</scope>
         </dependency>
         </dependency>
         <dependency>
         <dependency>
@@ -262,10 +246,7 @@
             <plugin>
             <plugin>
                 <groupId>org.apache.maven.plugins</groupId>
                 <groupId>org.apache.maven.plugins</groupId>
                 <artifactId>maven-compiler-plugin</artifactId>
                 <artifactId>maven-compiler-plugin</artifactId>
-                <version>${maven-compiler-plugin.version}</version>
                 <configuration>
                 <configuration>
-                    <source>${maven.compiler.source}</source>
-                    <target>${maven.compiler.target}</target>
                     <annotationProcessorPaths>
                     <annotationProcessorPaths>
                         <path>
                         <path>
                             <groupId>org.mapstruct</groupId>
                             <groupId>org.mapstruct</groupId>
@@ -293,7 +274,6 @@
             <plugin>
             <plugin>
                 <groupId>org.apache.maven.plugins</groupId>
                 <groupId>org.apache.maven.plugins</groupId>
                 <artifactId>maven-surefire-plugin</artifactId>
                 <artifactId>maven-surefire-plugin</artifactId>
-                <version>${maven-surefire-plugin.version}</version>
                 <configuration>
                 <configuration>
                     <argLine>@{argLine} --illegal-access=permit</argLine>
                     <argLine>@{argLine} --illegal-access=permit</argLine>
                 </configuration>
                 </configuration>
@@ -326,6 +306,7 @@
                         </configuration>
                         </configuration>
                     </execution>
                     </execution>
                 </executions>
                 </executions>
+
             </plugin>
             </plugin>
             <plugin>
             <plugin>
                 <groupId>org.antlr</groupId>
                 <groupId>org.antlr</groupId>
@@ -400,7 +381,6 @@
                     </plugin>
                     </plugin>
                     <plugin>
                     <plugin>
                         <artifactId>maven-resources-plugin</artifactId>
                         <artifactId>maven-resources-plugin</artifactId>
-                        <version>${maven-resources-plugin.version}</version>
                         <executions>
                         <executions>
                             <execution>
                             <execution>
                                 <id>copy-resources</id>
                                 <id>copy-resources</id>
@@ -495,5 +475,4 @@
         </profile>
         </profile>
     </profiles>
     </profiles>
 
 
-
 </project>
 </project>

+ 0 - 22
kafka-ui-api/src/main/java/com/provectus/kafka/ui/client/KafkaConnectClientsFactory.java

@@ -1,22 +0,0 @@
-package com.provectus.kafka.ui.client;
-
-import com.provectus.kafka.ui.connect.api.KafkaConnectClientApi;
-import com.provectus.kafka.ui.model.KafkaConnectCluster;
-import java.util.Map;
-import java.util.concurrent.ConcurrentHashMap;
-import org.springframework.beans.factory.annotation.Value;
-import org.springframework.stereotype.Service;
-import org.springframework.util.unit.DataSize;
-
-@Service
-public class KafkaConnectClientsFactory {
-
-  @Value("${webclient.max-in-memory-buffer-size:20MB}")
-  private DataSize maxBuffSize;
-
-  private final Map<String, KafkaConnectClientApi> cache = new ConcurrentHashMap<>();
-
-  public KafkaConnectClientApi withKafkaConnectConfig(KafkaConnectCluster config) {
-    return cache.computeIfAbsent(config.getAddress(), s -> new RetryingKafkaConnectClient(config, maxBuffSize));
-  }
-}

+ 20 - 47
kafka-ui-api/src/main/java/com/provectus/kafka/ui/client/RetryingKafkaConnectClient.java

@@ -1,33 +1,25 @@
 package com.provectus.kafka.ui.client;
 package com.provectus.kafka.ui.client;
 
 
-import com.fasterxml.jackson.databind.DeserializationFeature;
-import com.fasterxml.jackson.databind.ObjectMapper;
-import com.fasterxml.jackson.datatype.jsr310.JavaTimeModule;
+import static com.provectus.kafka.ui.config.ClustersProperties.ConnectCluster;
+
 import com.provectus.kafka.ui.connect.ApiClient;
 import com.provectus.kafka.ui.connect.ApiClient;
-import com.provectus.kafka.ui.connect.RFC3339DateFormat;
 import com.provectus.kafka.ui.connect.api.KafkaConnectClientApi;
 import com.provectus.kafka.ui.connect.api.KafkaConnectClientApi;
 import com.provectus.kafka.ui.connect.model.Connector;
 import com.provectus.kafka.ui.connect.model.Connector;
 import com.provectus.kafka.ui.connect.model.NewConnector;
 import com.provectus.kafka.ui.connect.model.NewConnector;
 import com.provectus.kafka.ui.exception.KafkaConnectConflictReponseException;
 import com.provectus.kafka.ui.exception.KafkaConnectConflictReponseException;
 import com.provectus.kafka.ui.exception.ValidationException;
 import com.provectus.kafka.ui.exception.ValidationException;
-import com.provectus.kafka.ui.model.KafkaConnectCluster;
-import java.text.DateFormat;
+import com.provectus.kafka.ui.util.WebClientConfigurator;
 import java.time.Duration;
 import java.time.Duration;
 import java.util.List;
 import java.util.List;
 import java.util.Map;
 import java.util.Map;
-import java.util.TimeZone;
 import lombok.extern.slf4j.Slf4j;
 import lombok.extern.slf4j.Slf4j;
-import org.openapitools.jackson.nullable.JsonNullableModule;
 import org.springframework.core.ParameterizedTypeReference;
 import org.springframework.core.ParameterizedTypeReference;
 import org.springframework.http.HttpHeaders;
 import org.springframework.http.HttpHeaders;
 import org.springframework.http.HttpMethod;
 import org.springframework.http.HttpMethod;
 import org.springframework.http.MediaType;
 import org.springframework.http.MediaType;
-import org.springframework.http.codec.json.Jackson2JsonDecoder;
-import org.springframework.http.codec.json.Jackson2JsonEncoder;
 import org.springframework.util.MultiValueMap;
 import org.springframework.util.MultiValueMap;
 import org.springframework.util.unit.DataSize;
 import org.springframework.util.unit.DataSize;
 import org.springframework.web.client.RestClientException;
 import org.springframework.web.client.RestClientException;
-import org.springframework.web.reactive.function.client.ExchangeStrategies;
 import org.springframework.web.reactive.function.client.WebClient;
 import org.springframework.web.reactive.function.client.WebClient;
 import org.springframework.web.reactive.function.client.WebClientResponseException;
 import org.springframework.web.reactive.function.client.WebClientResponseException;
 import reactor.core.publisher.Flux;
 import reactor.core.publisher.Flux;
@@ -39,7 +31,7 @@ public class RetryingKafkaConnectClient extends KafkaConnectClientApi {
   private static final int MAX_RETRIES = 5;
   private static final int MAX_RETRIES = 5;
   private static final Duration RETRIES_DELAY = Duration.ofMillis(200);
   private static final Duration RETRIES_DELAY = Duration.ofMillis(200);
 
 
-  public RetryingKafkaConnectClient(KafkaConnectCluster config, DataSize maxBuffSize) {
+  public RetryingKafkaConnectClient(ConnectCluster config, DataSize maxBuffSize) {
     super(new RetryingApiClient(config, maxBuffSize));
     super(new RetryingApiClient(config, maxBuffSize));
   }
   }
 
 
@@ -85,46 +77,27 @@ public class RetryingKafkaConnectClient extends KafkaConnectClientApi {
 
 
   private static class RetryingApiClient extends ApiClient {
   private static class RetryingApiClient extends ApiClient {
 
 
-    private static final DateFormat dateFormat = getDefaultDateFormat();
-    private static final ObjectMapper mapper = buildObjectMapper(dateFormat);
-
-    public RetryingApiClient(KafkaConnectCluster config, DataSize maxBuffSize) {
-      super(buildWebClient(mapper, maxBuffSize), mapper, dateFormat);
+    public RetryingApiClient(ConnectCluster config, DataSize maxBuffSize) {
+      super(buildWebClient(maxBuffSize, config), null, null);
       setBasePath(config.getAddress());
       setBasePath(config.getAddress());
       setUsername(config.getUserName());
       setUsername(config.getUserName());
       setPassword(config.getPassword());
       setPassword(config.getPassword());
     }
     }
 
 
-    public static DateFormat getDefaultDateFormat() {
-      DateFormat dateFormat = new RFC3339DateFormat();
-      dateFormat.setTimeZone(TimeZone.getTimeZone("UTC"));
-      return dateFormat;
-    }
-
-    public static WebClient buildWebClient(ObjectMapper mapper, DataSize maxBuffSize) {
-      ExchangeStrategies strategies = ExchangeStrategies
-              .builder()
-              .codecs(clientDefaultCodecsConfigurer -> {
-                clientDefaultCodecsConfigurer.defaultCodecs()
-                        .jackson2JsonEncoder(new Jackson2JsonEncoder(mapper, MediaType.APPLICATION_JSON));
-                clientDefaultCodecsConfigurer.defaultCodecs()
-                        .jackson2JsonDecoder(new Jackson2JsonDecoder(mapper, MediaType.APPLICATION_JSON));
-                clientDefaultCodecsConfigurer.defaultCodecs()
-                        .maxInMemorySize((int) maxBuffSize.toBytes());
-              })
-              .build();
-      WebClient.Builder webClient = WebClient.builder().exchangeStrategies(strategies);
-      return webClient.build();
-    }
-
-    public static ObjectMapper buildObjectMapper(DateFormat dateFormat) {
-      ObjectMapper mapper = new ObjectMapper();
-      mapper.setDateFormat(dateFormat);
-      mapper.registerModule(new JavaTimeModule());
-      mapper.configure(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES, false);
-      JsonNullableModule jnm = new JsonNullableModule();
-      mapper.registerModule(jnm);
-      return mapper;
+    public static WebClient buildWebClient(DataSize maxBuffSize, ConnectCluster config) {
+      return new WebClientConfigurator()
+          .configureSsl(
+              config.getKeystoreLocation(),
+              config.getKeystorePassword(),
+              config.getTruststoreLocation(),
+              config.getTruststorePassword()
+          )
+          .configureBasicAuth(
+              config.getUserName(),
+              config.getPassword()
+          )
+          .configureBufferSize(maxBuffSize)
+          .build();
     }
     }
 
 
     @Override
     @Override

+ 27 - 0
kafka-ui-api/src/main/java/com/provectus/kafka/ui/config/ClustersProperties.java

@@ -8,7 +8,10 @@ import java.util.Map;
 import java.util.Properties;
 import java.util.Properties;
 import java.util.Set;
 import java.util.Set;
 import javax.annotation.PostConstruct;
 import javax.annotation.PostConstruct;
+import lombok.AllArgsConstructor;
+import lombok.Builder;
 import lombok.Data;
 import lombok.Data;
+import lombok.NoArgsConstructor;
 import lombok.ToString;
 import lombok.ToString;
 import org.springframework.boot.context.properties.ConfigurationProperties;
 import org.springframework.boot.context.properties.ConfigurationProperties;
 import org.springframework.context.annotation.Configuration;
 import org.springframework.context.annotation.Configuration;
@@ -30,6 +33,7 @@ public class ClustersProperties {
     WebClientSsl schemaRegistrySsl;
     WebClientSsl schemaRegistrySsl;
     String ksqldbServer;
     String ksqldbServer;
     KsqldbServerAuth ksqldbServerAuth;
     KsqldbServerAuth ksqldbServerAuth;
+    WebClientSsl ksqldbServerSsl;
     List<ConnectCluster> kafkaConnect;
     List<ConnectCluster> kafkaConnect;
     MetricsConfigData metrics;
     MetricsConfigData metrics;
     Properties properties;
     Properties properties;
@@ -38,6 +42,8 @@ public class ClustersProperties {
     List<SerdeConfig> serde = new ArrayList<>();
     List<SerdeConfig> serde = new ArrayList<>();
     String defaultKeySerde;
     String defaultKeySerde;
     String defaultValueSerde;
     String defaultValueSerde;
+    List<Masking> masking = new ArrayList<>();
+    long pollingThrottleRate = 0;
   }
   }
 
 
   @Data
   @Data
@@ -50,11 +56,18 @@ public class ClustersProperties {
   }
   }
 
 
   @Data
   @Data
+  @NoArgsConstructor
+  @AllArgsConstructor
+  @Builder(toBuilder = true)
   public static class ConnectCluster {
   public static class ConnectCluster {
     String name;
     String name;
     String address;
     String address;
     String userName;
     String userName;
     String password;
     String password;
+    String keystoreLocation;
+    String keystorePassword;
+    String truststoreLocation;
+    String truststorePassword;
   }
   }
 
 
   @Data
   @Data
@@ -88,6 +101,20 @@ public class ClustersProperties {
     String password;
     String password;
   }
   }
 
 
+  @Data
+  public static class Masking {
+    Type type;
+    List<String> fields = List.of(); //if empty - policy will be applied to all fields
+    List<String> pattern = List.of("X", "x", "n", "-"); //used when type=MASK
+    String replacement = "***DATA_MASKED***"; //used when type=REPLACE
+    String topicKeysPattern;
+    String topicValuesPattern;
+
+    public enum Type {
+      REMOVE, MASK, REPLACE
+    }
+  }
+
   @PostConstruct
   @PostConstruct
   public void validateAndSetDefaults() {
   public void validateAndSetDefaults() {
     validateClusterNames();
     validateClusterNames();

+ 8 - 0
kafka-ui-api/src/main/java/com/provectus/kafka/ui/config/auth/AuthenticatedUser.java

@@ -0,0 +1,8 @@
+package com.provectus.kafka.ui.config.auth;
+
+import java.util.Collection;
+import lombok.Value;
+
+public record AuthenticatedUser(String principal, Collection<String> groups) {
+
+}

+ 0 - 80
kafka-ui-api/src/main/java/com/provectus/kafka/ui/config/auth/CognitoOAuthSecurityConfig.java

@@ -1,80 +0,0 @@
-package com.provectus.kafka.ui.config.auth;
-
-import com.provectus.kafka.ui.config.CognitoOidcLogoutSuccessHandler;
-import com.provectus.kafka.ui.config.auth.props.CognitoProperties;
-import java.util.Optional;
-import lombok.RequiredArgsConstructor;
-import lombok.extern.slf4j.Slf4j;
-import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty;
-import org.springframework.boot.context.properties.ConfigurationProperties;
-import org.springframework.context.annotation.Bean;
-import org.springframework.context.annotation.Configuration;
-import org.springframework.security.config.annotation.web.reactive.EnableWebFluxSecurity;
-import org.springframework.security.config.web.server.ServerHttpSecurity;
-import org.springframework.security.oauth2.client.registration.ClientRegistration;
-import org.springframework.security.oauth2.client.registration.ClientRegistrations;
-import org.springframework.security.oauth2.client.registration.InMemoryReactiveClientRegistrationRepository;
-import org.springframework.security.web.server.SecurityWebFilterChain;
-import org.springframework.security.web.server.authentication.logout.ServerLogoutSuccessHandler;
-
-@Configuration
-@EnableWebFluxSecurity
-@ConditionalOnProperty(value = "auth.type", havingValue = "OAUTH2_COGNITO")
-@RequiredArgsConstructor
-@Slf4j
-public class CognitoOAuthSecurityConfig extends AbstractAuthSecurityConfig {
-
-  private static final String COGNITO = "cognito";
-
-  @Bean
-  public SecurityWebFilterChain configure(ServerHttpSecurity http, CognitoProperties props) {
-    log.info("Configuring Cognito OAUTH2 authentication.");
-
-    String clientId = props.getClientId();
-    String logoutUrl = props.getLogoutUri();
-
-    final ServerLogoutSuccessHandler logoutHandler = new CognitoOidcLogoutSuccessHandler(logoutUrl, clientId);
-
-    return http.authorizeExchange()
-        .pathMatchers(AUTH_WHITELIST)
-        .permitAll()
-        .anyExchange()
-        .authenticated()
-
-        .and()
-        .oauth2Login()
-
-        .and()
-        .oauth2Client()
-
-        .and()
-        .logout()
-        .logoutSuccessHandler(logoutHandler)
-
-        .and()
-        .csrf().disable()
-        .build();
-  }
-
-  @Bean
-  public InMemoryReactiveClientRegistrationRepository clientRegistrationRepository(CognitoProperties props) {
-    ClientRegistration.Builder builder = ClientRegistrations
-        .fromIssuerLocation(props.getIssuerUri())
-        .registrationId(COGNITO);
-
-    builder.clientId(props.getClientId());
-    builder.clientSecret(props.getClientSecret());
-
-    Optional.ofNullable(props.getScope()).ifPresent(builder::scope);
-    Optional.ofNullable(props.getUserNameAttribute()).ifPresent(builder::userNameAttributeName);
-
-    return new InMemoryReactiveClientRegistrationRepository(builder.build());
-  }
-
-  @Bean
-  @ConfigurationProperties("auth.cognito")
-  public CognitoProperties cognitoProperties() {
-    return new CognitoProperties();
-  }
-
-}

+ 44 - 0
kafka-ui-api/src/main/java/com/provectus/kafka/ui/config/auth/OAuthProperties.java

@@ -0,0 +1,44 @@
+package com.provectus.kafka.ui.config.auth;
+
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.Map;
+import java.util.Set;
+import javax.annotation.PostConstruct;
+import lombok.Data;
+import org.springframework.boot.context.properties.ConfigurationProperties;
+import org.springframework.util.Assert;
+
+@ConfigurationProperties("auth.oauth2")
+@Data
+public class OAuthProperties {
+  private Map<String, OAuth2Provider> client = new HashMap<>();
+
+  @PostConstruct
+  public void validate() {
+    getClient().values().forEach(this::validateProvider);
+  }
+
+  private void validateProvider(final OAuth2Provider provider) {
+    Assert.hasText(provider.getClientId(), "Client id must not be empty.");
+    Assert.hasText(provider.getProvider(), "Provider name must not be empty");
+  }
+
+  @Data
+  public static class OAuth2Provider {
+    private String provider;
+    private String clientId;
+    private String clientSecret;
+    private String clientName;
+    private String redirectUri;
+    private String authorizationGrantType;
+    private Set<String> scope = new HashSet<>();
+    private String issuerUri;
+    private String authorizationUri;
+    private String tokenUri;
+    private String userInfoUri;
+    private String jwkSetUri;
+    private String userNameAttribute;
+    private Map<String, String> customParams = new HashMap<>();
+  }
+}

+ 77 - 0
kafka-ui-api/src/main/java/com/provectus/kafka/ui/config/auth/OAuthPropertiesConverter.java

@@ -0,0 +1,77 @@
+package com.provectus.kafka.ui.config.auth;
+
+import static com.provectus.kafka.ui.config.auth.OAuthProperties.OAuth2Provider;
+import static org.springframework.boot.autoconfigure.security.oauth2.client.OAuth2ClientProperties.Provider;
+import static org.springframework.boot.autoconfigure.security.oauth2.client.OAuth2ClientProperties.Registration;
+
+import lombok.AccessLevel;
+import lombok.NoArgsConstructor;
+import org.apache.commons.lang3.StringUtils;
+import org.springframework.boot.autoconfigure.security.oauth2.client.OAuth2ClientProperties;
+import org.springframework.security.config.oauth2.client.CommonOAuth2Provider;
+
+@NoArgsConstructor(access = AccessLevel.PRIVATE)
+public final class OAuthPropertiesConverter {
+
+  private static final String TYPE = "type";
+  private static final String GOOGLE = "google";
+  public static final String DUMMY = "dummy";
+
+  public static OAuth2ClientProperties convertProperties(final OAuthProperties properties) {
+    final var result = new OAuth2ClientProperties();
+    properties.getClient().forEach((key, provider) -> {
+      var registration = new Registration();
+      registration.setClientId(provider.getClientId());
+      registration.setClientSecret(provider.getClientSecret());
+      registration.setClientName(provider.getClientName());
+      registration.setScope(provider.getScope());
+      registration.setRedirectUri(provider.getRedirectUri());
+      registration.setAuthorizationGrantType(provider.getAuthorizationGrantType());
+
+      result.getRegistration().put(key, registration);
+
+      var clientProvider = new Provider();
+      applyCustomTransformations(provider);
+
+      clientProvider.setAuthorizationUri(provider.getAuthorizationUri());
+      clientProvider.setIssuerUri(provider.getIssuerUri());
+      clientProvider.setJwkSetUri(provider.getJwkSetUri());
+      clientProvider.setTokenUri(provider.getTokenUri());
+      clientProvider.setUserInfoUri(provider.getUserInfoUri());
+      clientProvider.setUserNameAttribute(provider.getUserNameAttribute());
+
+      result.getProvider().put(key, clientProvider);
+    });
+    return result;
+  }
+
+  private static void applyCustomTransformations(OAuth2Provider provider) {
+    applyGoogleTransformations(provider);
+  }
+
+  private static void applyGoogleTransformations(OAuth2Provider provider) {
+    if (!isGoogle(provider)) {
+      return;
+    }
+
+    String allowedDomain = provider.getCustomParams().get("allowedDomain");
+    if (StringUtils.isEmpty(allowedDomain)) {
+      return;
+    }
+
+    String authorizationUri = CommonOAuth2Provider.GOOGLE
+        .getBuilder(DUMMY)
+        .clientId(DUMMY)
+        .build()
+        .getProviderDetails()
+        .getAuthorizationUri();
+
+    final String newUri = authorizationUri + "?hd=" + allowedDomain;
+    provider.setAuthorizationUri(newUri);
+  }
+
+  private static boolean isGoogle(OAuth2Provider provider) {
+    return GOOGLE.equalsIgnoreCase(provider.getCustomParams().get(TYPE));
+  }
+}
+

+ 101 - 36
kafka-ui-api/src/main/java/com/provectus/kafka/ui/config/auth/OAuthSecurityConfig.java

@@ -1,66 +1,131 @@
 package com.provectus.kafka.ui.config.auth;
 package com.provectus.kafka.ui.config.auth;
 
 
-import lombok.AllArgsConstructor;
+import com.provectus.kafka.ui.config.auth.logout.OAuthLogoutSuccessHandler;
+import com.provectus.kafka.ui.service.rbac.AccessControlService;
+import com.provectus.kafka.ui.service.rbac.extractor.ProviderAuthorityExtractor;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Map;
+import java.util.Optional;
+import lombok.RequiredArgsConstructor;
 import lombok.extern.log4j.Log4j2;
 import lombok.extern.log4j.Log4j2;
+import org.jetbrains.annotations.Nullable;
 import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty;
 import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty;
-import org.springframework.context.ApplicationContext;
+import org.springframework.boot.autoconfigure.security.oauth2.client.OAuth2ClientProperties;
+import org.springframework.boot.autoconfigure.security.oauth2.client.OAuth2ClientPropertiesRegistrationAdapter;
+import org.springframework.boot.context.properties.EnableConfigurationProperties;
 import org.springframework.context.annotation.Bean;
 import org.springframework.context.annotation.Bean;
 import org.springframework.context.annotation.Configuration;
 import org.springframework.context.annotation.Configuration;
+import org.springframework.security.config.annotation.method.configuration.EnableReactiveMethodSecurity;
 import org.springframework.security.config.annotation.web.reactive.EnableWebFluxSecurity;
 import org.springframework.security.config.annotation.web.reactive.EnableWebFluxSecurity;
 import org.springframework.security.config.web.server.ServerHttpSecurity;
 import org.springframework.security.config.web.server.ServerHttpSecurity;
+import org.springframework.security.oauth2.client.oidc.userinfo.OidcReactiveOAuth2UserService;
+import org.springframework.security.oauth2.client.oidc.userinfo.OidcUserRequest;
+import org.springframework.security.oauth2.client.oidc.web.server.logout.OidcClientInitiatedServerLogoutSuccessHandler;
+import org.springframework.security.oauth2.client.registration.ClientRegistration;
+import org.springframework.security.oauth2.client.registration.InMemoryReactiveClientRegistrationRepository;
+import org.springframework.security.oauth2.client.registration.ReactiveClientRegistrationRepository;
+import org.springframework.security.oauth2.client.userinfo.DefaultReactiveOAuth2UserService;
+import org.springframework.security.oauth2.client.userinfo.OAuth2UserRequest;
+import org.springframework.security.oauth2.client.userinfo.ReactiveOAuth2UserService;
+import org.springframework.security.oauth2.core.oidc.user.OidcUser;
+import org.springframework.security.oauth2.core.user.OAuth2User;
 import org.springframework.security.web.server.SecurityWebFilterChain;
 import org.springframework.security.web.server.SecurityWebFilterChain;
-import org.springframework.util.ClassUtils;
+import org.springframework.security.web.server.authentication.logout.ServerLogoutSuccessHandler;
+import reactor.core.publisher.Mono;
 
 
 @Configuration
 @Configuration
-@EnableWebFluxSecurity
 @ConditionalOnProperty(value = "auth.type", havingValue = "OAUTH2")
 @ConditionalOnProperty(value = "auth.type", havingValue = "OAUTH2")
-@AllArgsConstructor
+@EnableConfigurationProperties(OAuthProperties.class)
+@EnableWebFluxSecurity
+@EnableReactiveMethodSecurity
+@RequiredArgsConstructor
 @Log4j2
 @Log4j2
 public class OAuthSecurityConfig extends AbstractAuthSecurityConfig {
 public class OAuthSecurityConfig extends AbstractAuthSecurityConfig {
 
 
-  public static final String REACTIVE_CLIENT_REGISTRATION_REPOSITORY_CLASSNAME =
-      "org.springframework.security.oauth2.client.registration."
-          + "ReactiveClientRegistrationRepository";
-
-  private static final boolean IS_OAUTH2_PRESENT = ClassUtils.isPresent(
-      REACTIVE_CLIENT_REGISTRATION_REPOSITORY_CLASSNAME,
-      OAuthSecurityConfig.class.getClassLoader()
-  );
-
-  private final ApplicationContext context;
+  private final OAuthProperties properties;
 
 
   @Bean
   @Bean
-  public SecurityWebFilterChain configure(ServerHttpSecurity http) {
+  public SecurityWebFilterChain configure(ServerHttpSecurity http, OAuthLogoutSuccessHandler logoutHandler) {
     log.info("Configuring OAUTH2 authentication.");
     log.info("Configuring OAUTH2 authentication.");
-    http.authorizeExchange()
+
+    return http.authorizeExchange()
         .pathMatchers(AUTH_WHITELIST)
         .pathMatchers(AUTH_WHITELIST)
         .permitAll()
         .permitAll()
         .anyExchange()
         .anyExchange()
-        .authenticated();
+        .authenticated()
+
+        .and()
+        .oauth2Login()
+
+        .and()
+        .logout()
+        .logoutSuccessHandler(logoutHandler)
+
+        .and()
+        .csrf().disable()
+        .build();
+  }
+
+  @Bean
+  public ReactiveOAuth2UserService<OidcUserRequest, OidcUser> customOidcUserService(AccessControlService acs) {
+    final OidcReactiveOAuth2UserService delegate = new OidcReactiveOAuth2UserService();
+    return request -> delegate.loadUser(request)
+        .flatMap(user -> {
+          String providerId = request.getClientRegistration().getRegistrationId();
+          final var extractor = getExtractor(providerId, acs);
+          if (extractor == null) {
+            return Mono.just(user);
+          }
 
 
-    if (IS_OAUTH2_PRESENT && OAuth2ClasspathGuard.shouldConfigure(this.context)) {
-      OAuth2ClasspathGuard.configure(http);
-    }
+          return extractor.extract(acs, user, Map.of("request", request))
+              .map(groups -> new RbacOidcUser(user, groups));
+        });
+  }
+
+  @Bean
+  public ReactiveOAuth2UserService<OAuth2UserRequest, OAuth2User> customOauth2UserService(AccessControlService acs) {
+    final DefaultReactiveOAuth2UserService delegate = new DefaultReactiveOAuth2UserService();
+    return request -> delegate.loadUser(request)
+        .flatMap(user -> {
+          String providerId = request.getClientRegistration().getRegistrationId();
+          final var extractor = getExtractor(providerId, acs);
+          if (extractor == null) {
+            return Mono.just(user);
+          }
 
 
-    return http.csrf().disable().build();
+          return extractor.extract(acs, user, Map.of("request", request))
+              .map(groups -> new RbacOAuth2User(user, groups));
+        });
   }
   }
 
 
-  private static class OAuth2ClasspathGuard {
-    static void configure(ServerHttpSecurity http) {
-      http
-          .oauth2Login()
-          .and()
-          .oauth2Client();
-    }
-
-    static boolean shouldConfigure(ApplicationContext context) {
-      ClassLoader loader = context.getClassLoader();
-      Class<?> reactiveClientRegistrationRepositoryClass =
-          ClassUtils.resolveClassName(REACTIVE_CLIENT_REGISTRATION_REPOSITORY_CLASSNAME, loader);
-      return context.getBeanNamesForType(reactiveClientRegistrationRepositoryClass).length == 1;
-    }
+  @Bean
+  public InMemoryReactiveClientRegistrationRepository clientRegistrationRepository() {
+    final OAuth2ClientProperties props = OAuthPropertiesConverter.convertProperties(properties);
+    final List<ClientRegistration> registrations =
+        new ArrayList<>(OAuth2ClientPropertiesRegistrationAdapter.getClientRegistrations(props).values());
+    return new InMemoryReactiveClientRegistrationRepository(registrations);
   }
   }
 
 
+  @Bean
+  public ServerLogoutSuccessHandler defaultOidcLogoutHandler(final ReactiveClientRegistrationRepository repository) {
+    return new OidcClientInitiatedServerLogoutSuccessHandler(repository);
+  }
+
+  @Nullable
+  private ProviderAuthorityExtractor getExtractor(final String providerId, AccessControlService acs) {
+    final String provider = getProviderByProviderId(providerId);
+    Optional<ProviderAuthorityExtractor> extractor = acs.getExtractors()
+        .stream()
+        .filter(e -> e.isApplicable(provider))
+        .findFirst();
+
+    return extractor.orElse(null);
+  }
+
+  private String getProviderByProviderId(final String providerId) {
+    return properties.getClient().get(providerId).getProvider();
+  }
 
 
 }
 }
 
 

+ 30 - 0
kafka-ui-api/src/main/java/com/provectus/kafka/ui/config/auth/RbacOAuth2User.java

@@ -0,0 +1,30 @@
+package com.provectus.kafka.ui.config.auth;
+
+import java.util.Collection;
+import java.util.Map;
+import lombok.Value;
+import org.springframework.security.core.GrantedAuthority;
+import org.springframework.security.oauth2.core.user.OAuth2User;
+
+public record RbacOAuth2User(OAuth2User user, Collection<String> groups) implements RbacUser, OAuth2User {
+
+  @Override
+  public Map<String, Object> getAttributes() {
+    return user.getAttributes();
+  }
+
+  @Override
+  public Collection<? extends GrantedAuthority> getAuthorities() {
+    return user.getAuthorities();
+  }
+
+  @Override
+  public String getName() {
+    return user.getName();
+  }
+
+  @Override
+  public String name() {
+    return user.getName();
+  }
+}

+ 47 - 0
kafka-ui-api/src/main/java/com/provectus/kafka/ui/config/auth/RbacOidcUser.java

@@ -0,0 +1,47 @@
+package com.provectus.kafka.ui.config.auth;
+
+import java.util.Collection;
+import java.util.Map;
+import lombok.Value;
+import org.springframework.security.core.GrantedAuthority;
+import org.springframework.security.oauth2.core.oidc.OidcIdToken;
+import org.springframework.security.oauth2.core.oidc.OidcUserInfo;
+import org.springframework.security.oauth2.core.oidc.user.OidcUser;
+
+public record RbacOidcUser(OidcUser user, Collection<String> groups) implements RbacUser, OidcUser {
+
+  @Override
+  public Map<String, Object> getClaims() {
+    return user.getClaims();
+  }
+
+  @Override
+  public OidcUserInfo getUserInfo() {
+    return user.getUserInfo();
+  }
+
+  @Override
+  public OidcIdToken getIdToken() {
+    return user.getIdToken();
+  }
+
+  @Override
+  public Map<String, Object> getAttributes() {
+    return user.getAttributes();
+  }
+
+  @Override
+  public Collection<? extends GrantedAuthority> getAuthorities() {
+    return user.getAuthorities();
+  }
+
+  @Override
+  public String getName() {
+    return user.getName();
+  }
+
+  @Override
+  public String name() {
+    return user.getName();
+  }
+}

+ 10 - 0
kafka-ui-api/src/main/java/com/provectus/kafka/ui/config/auth/RbacUser.java

@@ -0,0 +1,10 @@
+package com.provectus.kafka.ui.config.auth;
+
+import java.util.Collection;
+
+public interface RbacUser {
+  String name();
+
+  Collection<String> groups();
+
+}

+ 23 - 0
kafka-ui-api/src/main/java/com/provectus/kafka/ui/config/auth/RoleBasedAccessControlProperties.java

@@ -0,0 +1,23 @@
+package com.provectus.kafka.ui.config.auth;
+
+import com.provectus.kafka.ui.model.rbac.Role;
+import java.util.ArrayList;
+import java.util.List;
+import javax.annotation.PostConstruct;
+import org.springframework.boot.context.properties.ConfigurationProperties;
+
+@ConfigurationProperties("rbac")
+public class RoleBasedAccessControlProperties {
+
+  private final List<Role> roles = new ArrayList<>();
+
+  @PostConstruct
+  public void init() {
+    roles.forEach(Role::validate);
+  }
+
+  public List<Role> getRoles() {
+    return roles;
+  }
+
+}

+ 13 - 0
kafka-ui-api/src/main/java/com/provectus/kafka/ui/config/auth/condition/CognitoCondition.java

@@ -0,0 +1,13 @@
+package com.provectus.kafka.ui.config.auth.condition;
+
+import com.provectus.kafka.ui.service.rbac.AbstractProviderCondition;
+import org.springframework.context.annotation.Condition;
+import org.springframework.context.annotation.ConditionContext;
+import org.springframework.core.type.AnnotatedTypeMetadata;
+
+public class CognitoCondition extends AbstractProviderCondition implements Condition {
+  @Override
+  public boolean matches(final ConditionContext context, final AnnotatedTypeMetadata metadata) {
+    return getRegisteredProvidersTypes(context.getEnvironment()).stream().anyMatch(a -> a.equalsIgnoreCase("cognito"));
+  }
+}

+ 18 - 10
kafka-ui-api/src/main/java/com/provectus/kafka/ui/config/CognitoOidcLogoutSuccessHandler.java → kafka-ui-api/src/main/java/com/provectus/kafka/ui/config/auth/logout/CognitoLogoutSuccessHandler.java

@@ -1,27 +1,34 @@
-package com.provectus.kafka.ui.config;
+package com.provectus.kafka.ui.config.auth.logout;
 
 
+import com.provectus.kafka.ui.config.auth.OAuthProperties;
+import com.provectus.kafka.ui.config.auth.condition.CognitoCondition;
+import com.provectus.kafka.ui.model.rbac.provider.Provider;
 import java.net.URI;
 import java.net.URI;
 import java.nio.charset.StandardCharsets;
 import java.nio.charset.StandardCharsets;
-import lombok.RequiredArgsConstructor;
+import org.springframework.context.annotation.Conditional;
 import org.springframework.http.HttpStatus;
 import org.springframework.http.HttpStatus;
 import org.springframework.http.server.reactive.ServerHttpResponse;
 import org.springframework.http.server.reactive.ServerHttpResponse;
 import org.springframework.security.core.Authentication;
 import org.springframework.security.core.Authentication;
 import org.springframework.security.web.server.WebFilterExchange;
 import org.springframework.security.web.server.WebFilterExchange;
-import org.springframework.security.web.server.authentication.logout.ServerLogoutSuccessHandler;
 import org.springframework.security.web.util.UrlUtils;
 import org.springframework.security.web.util.UrlUtils;
+import org.springframework.stereotype.Component;
 import org.springframework.web.server.WebSession;
 import org.springframework.web.server.WebSession;
 import org.springframework.web.util.UriComponents;
 import org.springframework.web.util.UriComponents;
 import org.springframework.web.util.UriComponentsBuilder;
 import org.springframework.web.util.UriComponentsBuilder;
 import reactor.core.publisher.Mono;
 import reactor.core.publisher.Mono;
 
 
-@RequiredArgsConstructor
-public class CognitoOidcLogoutSuccessHandler implements ServerLogoutSuccessHandler {
+@Component
+@Conditional(CognitoCondition.class)
+public class CognitoLogoutSuccessHandler implements LogoutSuccessHandler {
 
 
-  private final String logoutUrl;
-  private final String clientId;
+  @Override
+  public boolean isApplicable(String provider) {
+    return Provider.Name.COGNITO.equalsIgnoreCase(provider);
+  }
 
 
   @Override
   @Override
-  public Mono<Void> onLogoutSuccess(final WebFilterExchange exchange, final Authentication authentication) {
+  public Mono<Void> handle(WebFilterExchange exchange, Authentication authentication,
+                           OAuthProperties.OAuth2Provider provider) {
     final ServerHttpResponse response = exchange.getExchange().getResponse();
     final ServerHttpResponse response = exchange.getExchange().getResponse();
     response.setStatusCode(HttpStatus.FOUND);
     response.setStatusCode(HttpStatus.FOUND);
 
 
@@ -39,8 +46,8 @@ public class CognitoOidcLogoutSuccessHandler implements ServerLogoutSuccessHandl
         .build();
         .build();
 
 
     final var uri = UriComponentsBuilder
     final var uri = UriComponentsBuilder
-        .fromUri(URI.create(logoutUrl))
-        .queryParam("client_id", clientId)
+        .fromUri(URI.create(provider.getCustomParams().get("logoutUrl")))
+        .queryParam("client_id", provider.getClientId())
         .queryParam("logout_uri", baseUrl)
         .queryParam("logout_uri", baseUrl)
         .encode(StandardCharsets.UTF_8)
         .encode(StandardCharsets.UTF_8)
         .build()
         .build()
@@ -49,5 +56,6 @@ public class CognitoOidcLogoutSuccessHandler implements ServerLogoutSuccessHandl
     response.getHeaders().setLocation(uri);
     response.getHeaders().setLocation(uri);
     return exchange.getExchange().getSession().flatMap(WebSession::invalidate);
     return exchange.getExchange().getSession().flatMap(WebSession::invalidate);
   }
   }
+
 }
 }
 
 

+ 15 - 0
kafka-ui-api/src/main/java/com/provectus/kafka/ui/config/auth/logout/LogoutSuccessHandler.java

@@ -0,0 +1,15 @@
+package com.provectus.kafka.ui.config.auth.logout;
+
+import com.provectus.kafka.ui.config.auth.OAuthProperties;
+import org.springframework.security.core.Authentication;
+import org.springframework.security.web.server.WebFilterExchange;
+import reactor.core.publisher.Mono;
+
+public interface LogoutSuccessHandler {
+
+  boolean isApplicable(final String provider);
+
+  Mono<Void> handle(final WebFilterExchange exchange,
+                    final Authentication authentication,
+                    final OAuthProperties.OAuth2Provider provider);
+}

+ 46 - 0
kafka-ui-api/src/main/java/com/provectus/kafka/ui/config/auth/logout/OAuthLogoutSuccessHandler.java

@@ -0,0 +1,46 @@
+package com.provectus.kafka.ui.config.auth.logout;
+
+import com.provectus.kafka.ui.config.auth.OAuthProperties;
+import java.util.List;
+import java.util.Optional;
+import org.springframework.beans.factory.annotation.Qualifier;
+import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty;
+import org.springframework.security.core.Authentication;
+import org.springframework.security.oauth2.client.authentication.OAuth2AuthenticationToken;
+import org.springframework.security.web.server.WebFilterExchange;
+import org.springframework.security.web.server.authentication.logout.ServerLogoutSuccessHandler;
+import org.springframework.stereotype.Component;
+import reactor.core.publisher.Mono;
+
+@Component
+@ConditionalOnProperty(value = "auth.type", havingValue = "OAUTH2")
+public class OAuthLogoutSuccessHandler implements ServerLogoutSuccessHandler {
+  private final OAuthProperties properties;
+  private final List<LogoutSuccessHandler> logoutSuccessHandlers;
+  private final ServerLogoutSuccessHandler defaultOidcLogoutHandler;
+
+  public OAuthLogoutSuccessHandler(final OAuthProperties properties,
+                                   final List<LogoutSuccessHandler> logoutSuccessHandlers,
+                                   final @Qualifier("defaultOidcLogoutHandler") ServerLogoutSuccessHandler handler) {
+    this.properties = properties;
+    this.logoutSuccessHandlers = logoutSuccessHandlers;
+    this.defaultOidcLogoutHandler = handler;
+  }
+
+  @Override
+  public Mono<Void> onLogoutSuccess(final WebFilterExchange exchange,
+                                    final Authentication authentication) {
+    final OAuth2AuthenticationToken oauthToken = (OAuth2AuthenticationToken) authentication;
+    final String providerId = oauthToken.getAuthorizedClientRegistrationId();
+    final OAuthProperties.OAuth2Provider oAuth2Provider = properties.getClient().get(providerId);
+    return getLogoutHandler(oAuth2Provider.getProvider())
+        .map(handler -> handler.handle(exchange, authentication, oAuth2Provider))
+        .orElseGet(() -> defaultOidcLogoutHandler.onLogoutSuccess(exchange, authentication));
+  }
+
+  private Optional<LogoutSuccessHandler> getLogoutHandler(final String provider) {
+    return logoutSuccessHandlers.stream()
+        .filter(h -> h.isApplicable(provider))
+        .findFirst();
+  }
+}

+ 0 - 44
kafka-ui-api/src/main/java/com/provectus/kafka/ui/config/auth/props/CognitoProperties.java

@@ -1,44 +0,0 @@
-package com.provectus.kafka.ui.config.auth.props;
-
-import lombok.Data;
-import lombok.ToString;
-import org.jetbrains.annotations.Nullable;
-
-@Data
-@ToString(exclude = "clientSecret")
-public class CognitoProperties {
-
-  String clientId;
-  String logoutUri;
-  String issuerUri;
-  String clientSecret;
-  @Nullable
-  String scope;
-  @Nullable
-  String userNameAttribute;
-
-  public String getClientId() {
-    return clientId;
-  }
-
-  public String getLogoutUri() {
-    return logoutUri;
-  }
-
-  public String getIssuerUri() {
-    return issuerUri;
-  }
-
-  public String getClientSecret() {
-    return clientSecret;
-  }
-
-  public @Nullable String getScope() {
-    return scope;
-  }
-
-  public @Nullable String getUserNameAttribute() {
-    return userNameAttribute;
-  }
-
-}

+ 80 - 0
kafka-ui-api/src/main/java/com/provectus/kafka/ui/controller/AccessController.java

@@ -0,0 +1,80 @@
+package com.provectus.kafka.ui.controller;
+
+import com.provectus.kafka.ui.api.AuthorizationApi;
+import com.provectus.kafka.ui.model.ActionDTO;
+import com.provectus.kafka.ui.model.AuthenticationInfoDTO;
+import com.provectus.kafka.ui.model.ResourceTypeDTO;
+import com.provectus.kafka.ui.model.UserInfoDTO;
+import com.provectus.kafka.ui.model.UserPermissionDTO;
+import com.provectus.kafka.ui.model.rbac.Permission;
+import com.provectus.kafka.ui.service.rbac.AccessControlService;
+import java.security.Principal;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.List;
+import java.util.stream.Collectors;
+import lombok.RequiredArgsConstructor;
+import org.springframework.http.ResponseEntity;
+import org.springframework.security.core.context.ReactiveSecurityContextHolder;
+import org.springframework.security.core.context.SecurityContext;
+import org.springframework.web.bind.annotation.RestController;
+import org.springframework.web.server.ServerWebExchange;
+import reactor.core.publisher.Mono;
+
+@RestController
+@RequiredArgsConstructor
+public class AccessController implements AuthorizationApi {
+
+  private final AccessControlService accessControlService;
+
+  public Mono<ResponseEntity<AuthenticationInfoDTO>> getUserAuthInfo(ServerWebExchange exchange) {
+    AuthenticationInfoDTO dto = new AuthenticationInfoDTO();
+    dto.setRbacEnabled(accessControlService.isRbacEnabled());
+    UserInfoDTO userInfo = new UserInfoDTO();
+
+    Mono<List<UserPermissionDTO>> permissions = accessControlService.getUser()
+        .map(user -> accessControlService.getRoles()
+            .stream()
+            .filter(role -> user.groups().contains(role.getName()))
+            .map(role -> mapPermissions(role.getPermissions(), role.getClusters()))
+            .flatMap(Collection::stream)
+            .collect(Collectors.toList())
+        )
+        .switchIfEmpty(Mono.just(Collections.emptyList()));
+
+    Mono<String> userName = ReactiveSecurityContextHolder.getContext()
+        .map(SecurityContext::getAuthentication)
+        .map(Principal::getName);
+
+    return userName
+        .zipWith(permissions)
+        .map(data -> {
+          userInfo.setUsername(data.getT1());
+          userInfo.setPermissions(data.getT2());
+
+          dto.setUserInfo(userInfo);
+          return dto;
+        })
+        .switchIfEmpty(Mono.just(dto))
+        .map(ResponseEntity::ok);
+  }
+
+  private List<UserPermissionDTO> mapPermissions(List<Permission> permissions, List<String> clusters) {
+    return permissions
+        .stream()
+        .map(permission -> {
+          UserPermissionDTO dto = new UserPermissionDTO();
+          dto.setClusters(clusters);
+          dto.setResource(ResourceTypeDTO.fromValue(permission.getResource().toString().toUpperCase()));
+          dto.setValue(permission.getValue() != null ? permission.getValue().toString() : null);
+          dto.setActions(permission.getActions()
+              .stream()
+              .map(String::toUpperCase)
+              .map(ActionDTO::valueOf)
+              .collect(Collectors.toList()));
+          return dto;
+        })
+        .collect(Collectors.toList());
+  }
+
+}

+ 68 - 26
kafka-ui-api/src/main/java/com/provectus/kafka/ui/controller/BrokersController.java

@@ -8,7 +8,10 @@ import com.provectus.kafka.ui.model.BrokerDTO;
 import com.provectus.kafka.ui.model.BrokerLogdirUpdateDTO;
 import com.provectus.kafka.ui.model.BrokerLogdirUpdateDTO;
 import com.provectus.kafka.ui.model.BrokerMetricsDTO;
 import com.provectus.kafka.ui.model.BrokerMetricsDTO;
 import com.provectus.kafka.ui.model.BrokersLogdirsDTO;
 import com.provectus.kafka.ui.model.BrokersLogdirsDTO;
+import com.provectus.kafka.ui.model.rbac.AccessContext;
+import com.provectus.kafka.ui.model.rbac.permission.ClusterConfigAction;
 import com.provectus.kafka.ui.service.BrokerService;
 import com.provectus.kafka.ui.service.BrokerService;
+import com.provectus.kafka.ui.service.rbac.AccessControlService;
 import java.util.List;
 import java.util.List;
 import lombok.RequiredArgsConstructor;
 import lombok.RequiredArgsConstructor;
 import lombok.extern.slf4j.Slf4j;
 import lombok.extern.slf4j.Slf4j;
@@ -24,46 +27,78 @@ import reactor.core.publisher.Mono;
 public class BrokersController extends AbstractController implements BrokersApi {
 public class BrokersController extends AbstractController implements BrokersApi {
   private final BrokerService brokerService;
   private final BrokerService brokerService;
   private final ClusterMapper clusterMapper;
   private final ClusterMapper clusterMapper;
+  private final AccessControlService accessControlService;
 
 
   @Override
   @Override
-  public Mono<ResponseEntity<BrokerMetricsDTO>> getBrokersMetrics(String clusterName, Integer id,
-                                                                  ServerWebExchange exchange) {
-    return brokerService.getBrokerMetrics(getCluster(clusterName), id)
-        .map(clusterMapper::toBrokerMetrics)
-        .map(ResponseEntity::ok)
-        .onErrorReturn(ResponseEntity.notFound().build());
+  public Mono<ResponseEntity<Flux<BrokerDTO>>> getBrokers(String clusterName,
+                                                          ServerWebExchange exchange) {
+    Mono<Void> validateAccess = accessControlService.validateAccess(AccessContext.builder()
+        .cluster(clusterName)
+        .build());
+
+    var job = brokerService.getBrokers(getCluster(clusterName)).map(clusterMapper::toBrokerDto);
+
+    return validateAccess.thenReturn(ResponseEntity.ok(job));
   }
   }
 
 
   @Override
   @Override
-  public Mono<ResponseEntity<Flux<BrokerDTO>>> getBrokers(String clusterName,
-                                                          ServerWebExchange exchange) {
-    return Mono.just(ResponseEntity.ok(brokerService.getBrokers(getCluster(clusterName))));
+  public Mono<ResponseEntity<BrokerMetricsDTO>> getBrokersMetrics(String clusterName, Integer id,
+                                                                  ServerWebExchange exchange) {
+    Mono<Void> validateAccess = accessControlService.validateAccess(AccessContext.builder()
+        .cluster(clusterName)
+        .build());
+
+    return validateAccess.then(
+        brokerService.getBrokerMetrics(getCluster(clusterName), id)
+            .map(clusterMapper::toBrokerMetrics)
+            .map(ResponseEntity::ok)
+            .onErrorReturn(ResponseEntity.notFound().build())
+    );
   }
   }
 
 
   @Override
   @Override
   public Mono<ResponseEntity<Flux<BrokersLogdirsDTO>>> getAllBrokersLogdirs(String clusterName,
   public Mono<ResponseEntity<Flux<BrokersLogdirsDTO>>> getAllBrokersLogdirs(String clusterName,
                                                                             List<Integer> brokers,
                                                                             List<Integer> brokers,
-                                                                            ServerWebExchange exchange
-  ) {
-    return Mono.just(ResponseEntity.ok(
+                                                                            ServerWebExchange exchange) {
+    Mono<Void> validateAccess = accessControlService.validateAccess(AccessContext.builder()
+        .cluster(clusterName)
+        .build());
+
+    return validateAccess.thenReturn(ResponseEntity.ok(
         brokerService.getAllBrokersLogdirs(getCluster(clusterName), brokers)));
         brokerService.getAllBrokersLogdirs(getCluster(clusterName), brokers)));
   }
   }
 
 
   @Override
   @Override
-  public Mono<ResponseEntity<Flux<BrokerConfigDTO>>> getBrokerConfig(String clusterName, Integer id,
+  public Mono<ResponseEntity<Flux<BrokerConfigDTO>>> getBrokerConfig(String clusterName,
+                                                                     Integer id,
                                                                      ServerWebExchange exchange) {
                                                                      ServerWebExchange exchange) {
-    return Mono.just(ResponseEntity.ok(
-        brokerService.getBrokerConfig(getCluster(clusterName), id)
-            .map(clusterMapper::toBrokerConfig)));
+    Mono<Void> validateAccess = accessControlService.validateAccess(AccessContext.builder()
+        .cluster(clusterName)
+        .clusterConfigActions(ClusterConfigAction.VIEW)
+        .build());
+
+    return validateAccess.thenReturn(
+        ResponseEntity.ok(
+            brokerService.getBrokerConfig(getCluster(clusterName), id)
+                .map(clusterMapper::toBrokerConfig))
+    );
   }
   }
 
 
   @Override
   @Override
-  public Mono<ResponseEntity<Void>> updateBrokerTopicPartitionLogDir(
-      String clusterName, Integer id, Mono<BrokerLogdirUpdateDTO> brokerLogdir,
-      ServerWebExchange exchange) {
-    return brokerLogdir
-        .flatMap(bld -> brokerService.updateBrokerLogDir(getCluster(clusterName), id, bld))
-        .map(ResponseEntity::ok);
+  public Mono<ResponseEntity<Void>> updateBrokerTopicPartitionLogDir(String clusterName,
+                                                                     Integer id,
+                                                                     Mono<BrokerLogdirUpdateDTO> brokerLogdir,
+                                                                     ServerWebExchange exchange) {
+    Mono<Void> validateAccess = accessControlService.validateAccess(AccessContext.builder()
+        .cluster(clusterName)
+        .clusterConfigActions(ClusterConfigAction.VIEW, ClusterConfigAction.EDIT)
+        .build());
+
+    return validateAccess.then(
+        brokerLogdir
+            .flatMap(bld -> brokerService.updateBrokerLogDir(getCluster(clusterName), id, bld))
+            .map(ResponseEntity::ok)
+    );
   }
   }
 
 
   @Override
   @Override
@@ -72,9 +107,16 @@ public class BrokersController extends AbstractController implements BrokersApi
                                                              String name,
                                                              String name,
                                                              Mono<BrokerConfigItemDTO> brokerConfig,
                                                              Mono<BrokerConfigItemDTO> brokerConfig,
                                                              ServerWebExchange exchange) {
                                                              ServerWebExchange exchange) {
-    return brokerConfig
-        .flatMap(bci -> brokerService.updateBrokerConfigByName(
-            getCluster(clusterName), id, name, bci.getValue()))
-        .map(ResponseEntity::ok);
+    Mono<Void> validateAccess = accessControlService.validateAccess(AccessContext.builder()
+        .cluster(clusterName)
+        .clusterConfigActions(ClusterConfigAction.VIEW, ClusterConfigAction.EDIT)
+        .build());
+
+    return validateAccess.then(
+        brokerConfig
+            .flatMap(bci -> brokerService.updateBrokerConfigByName(
+                getCluster(clusterName), id, name, bci.getValue()))
+            .map(ResponseEntity::ok)
+    );
   }
   }
 }
 }

+ 39 - 11
kafka-ui-api/src/main/java/com/provectus/kafka/ui/controller/ClustersController.java

@@ -4,7 +4,9 @@ import com.provectus.kafka.ui.api.ClustersApi;
 import com.provectus.kafka.ui.model.ClusterDTO;
 import com.provectus.kafka.ui.model.ClusterDTO;
 import com.provectus.kafka.ui.model.ClusterMetricsDTO;
 import com.provectus.kafka.ui.model.ClusterMetricsDTO;
 import com.provectus.kafka.ui.model.ClusterStatsDTO;
 import com.provectus.kafka.ui.model.ClusterStatsDTO;
+import com.provectus.kafka.ui.model.rbac.AccessContext;
 import com.provectus.kafka.ui.service.ClusterService;
 import com.provectus.kafka.ui.service.ClusterService;
+import com.provectus.kafka.ui.service.rbac.AccessControlService;
 import lombok.RequiredArgsConstructor;
 import lombok.RequiredArgsConstructor;
 import lombok.extern.slf4j.Slf4j;
 import lombok.extern.slf4j.Slf4j;
 import org.springframework.http.ResponseEntity;
 import org.springframework.http.ResponseEntity;
@@ -18,31 +20,57 @@ import reactor.core.publisher.Mono;
 @Slf4j
 @Slf4j
 public class ClustersController extends AbstractController implements ClustersApi {
 public class ClustersController extends AbstractController implements ClustersApi {
   private final ClusterService clusterService;
   private final ClusterService clusterService;
+  private final AccessControlService accessControlService;
+
+  @Override
+  public Mono<ResponseEntity<Flux<ClusterDTO>>> getClusters(ServerWebExchange exchange) {
+    Flux<ClusterDTO> job = Flux.fromIterable(clusterService.getClusters())
+        .filterWhen(accessControlService::isClusterAccessible);
+
+    return Mono.just(ResponseEntity.ok(job));
+  }
 
 
   @Override
   @Override
   public Mono<ResponseEntity<ClusterMetricsDTO>> getClusterMetrics(String clusterName,
   public Mono<ResponseEntity<ClusterMetricsDTO>> getClusterMetrics(String clusterName,
                                                                    ServerWebExchange exchange) {
                                                                    ServerWebExchange exchange) {
-    return clusterService.getClusterMetrics(getCluster(clusterName))
-        .map(ResponseEntity::ok)
-        .onErrorReturn(ResponseEntity.notFound().build());
+    AccessContext context = AccessContext.builder()
+        .cluster(clusterName)
+        .build();
+
+    return accessControlService.validateAccess(context)
+        .then(
+            clusterService.getClusterMetrics(getCluster(clusterName))
+                .map(ResponseEntity::ok)
+                .onErrorReturn(ResponseEntity.notFound().build())
+        );
   }
   }
 
 
   @Override
   @Override
   public Mono<ResponseEntity<ClusterStatsDTO>> getClusterStats(String clusterName,
   public Mono<ResponseEntity<ClusterStatsDTO>> getClusterStats(String clusterName,
                                                                ServerWebExchange exchange) {
                                                                ServerWebExchange exchange) {
-    return clusterService.getClusterStats(getCluster(clusterName))
-        .map(ResponseEntity::ok)
-        .onErrorReturn(ResponseEntity.notFound().build());
-  }
+    AccessContext context = AccessContext.builder()
+        .cluster(clusterName)
+        .build();
 
 
-  @Override
-  public Mono<ResponseEntity<Flux<ClusterDTO>>> getClusters(ServerWebExchange exchange) {
-    return Mono.just(ResponseEntity.ok(Flux.fromIterable(clusterService.getClusters())));
+    return accessControlService.validateAccess(context)
+        .then(
+            clusterService.getClusterStats(getCluster(clusterName))
+                .map(ResponseEntity::ok)
+                .onErrorReturn(ResponseEntity.notFound().build())
+        );
   }
   }
 
 
   @Override
   @Override
   public Mono<ResponseEntity<ClusterDTO>> updateClusterInfo(String clusterName,
   public Mono<ResponseEntity<ClusterDTO>> updateClusterInfo(String clusterName,
                                                             ServerWebExchange exchange) {
                                                             ServerWebExchange exchange) {
-    return clusterService.updateCluster(getCluster(clusterName)).map(ResponseEntity::ok);
+
+    AccessContext context = AccessContext.builder()
+        .cluster(clusterName)
+        .build();
+
+    return accessControlService.validateAccess(context)
+        .then(
+            clusterService.updateCluster(getCluster(clusterName)).map(ResponseEntity::ok)
+        );
   }
   }
 }
 }

+ 129 - 71
kafka-ui-api/src/main/java/com/provectus/kafka/ui/controller/ConsumerGroupsController.java

@@ -1,5 +1,8 @@
 package com.provectus.kafka.ui.controller;
 package com.provectus.kafka.ui.controller;
 
 
+import static com.provectus.kafka.ui.model.rbac.permission.ConsumerGroupAction.DELETE;
+import static com.provectus.kafka.ui.model.rbac.permission.ConsumerGroupAction.RESET_OFFSETS;
+import static com.provectus.kafka.ui.model.rbac.permission.ConsumerGroupAction.VIEW;
 import static java.util.stream.Collectors.toMap;
 import static java.util.stream.Collectors.toMap;
 
 
 import com.provectus.kafka.ui.api.ConsumerGroupsApi;
 import com.provectus.kafka.ui.api.ConsumerGroupsApi;
@@ -12,10 +15,14 @@ import com.provectus.kafka.ui.model.ConsumerGroupOrderingDTO;
 import com.provectus.kafka.ui.model.ConsumerGroupsPageResponseDTO;
 import com.provectus.kafka.ui.model.ConsumerGroupsPageResponseDTO;
 import com.provectus.kafka.ui.model.PartitionOffsetDTO;
 import com.provectus.kafka.ui.model.PartitionOffsetDTO;
 import com.provectus.kafka.ui.model.SortOrderDTO;
 import com.provectus.kafka.ui.model.SortOrderDTO;
+import com.provectus.kafka.ui.model.rbac.AccessContext;
+import com.provectus.kafka.ui.model.rbac.permission.TopicAction;
 import com.provectus.kafka.ui.service.ConsumerGroupService;
 import com.provectus.kafka.ui.service.ConsumerGroupService;
 import com.provectus.kafka.ui.service.OffsetsResetService;
 import com.provectus.kafka.ui.service.OffsetsResetService;
+import com.provectus.kafka.ui.service.rbac.AccessControlService;
 import java.util.Map;
 import java.util.Map;
 import java.util.Optional;
 import java.util.Optional;
+import java.util.function.Supplier;
 import java.util.stream.Collectors;
 import java.util.stream.Collectors;
 import lombok.RequiredArgsConstructor;
 import lombok.RequiredArgsConstructor;
 import lombok.extern.slf4j.Slf4j;
 import lombok.extern.slf4j.Slf4j;
@@ -34,33 +41,65 @@ public class ConsumerGroupsController extends AbstractController implements Cons
 
 
   private final ConsumerGroupService consumerGroupService;
   private final ConsumerGroupService consumerGroupService;
   private final OffsetsResetService offsetsResetService;
   private final OffsetsResetService offsetsResetService;
+  private final AccessControlService accessControlService;
 
 
   @Value("${consumer.groups.page.size:25}")
   @Value("${consumer.groups.page.size:25}")
   private int defaultConsumerGroupsPageSize;
   private int defaultConsumerGroupsPageSize;
 
 
   @Override
   @Override
-  public Mono<ResponseEntity<Void>> deleteConsumerGroup(String clusterName, String id,
+  public Mono<ResponseEntity<Void>> deleteConsumerGroup(String clusterName,
+                                                        String id,
                                                         ServerWebExchange exchange) {
                                                         ServerWebExchange exchange) {
-    return consumerGroupService.deleteConsumerGroupById(getCluster(clusterName), id)
-        .thenReturn(ResponseEntity.ok().build());
+    Mono<Void> validateAccess = accessControlService.validateAccess(AccessContext.builder()
+        .cluster(clusterName)
+        .consumerGroup(id)
+        .consumerGroupActions(DELETE)
+        .build());
+
+    return validateAccess.then(
+        consumerGroupService.deleteConsumerGroupById(getCluster(clusterName), id)
+            .thenReturn(ResponseEntity.ok().build())
+    );
   }
   }
 
 
   @Override
   @Override
-  public Mono<ResponseEntity<ConsumerGroupDetailsDTO>> getConsumerGroup(
-      String clusterName, String consumerGroupId, ServerWebExchange exchange) {
-    return consumerGroupService.getConsumerGroupDetail(getCluster(clusterName), consumerGroupId)
-        .map(ConsumerGroupMapper::toDetailsDto)
-        .map(ResponseEntity::ok);
+  public Mono<ResponseEntity<ConsumerGroupDetailsDTO>> getConsumerGroup(String clusterName,
+                                                                        String consumerGroupId,
+                                                                        ServerWebExchange exchange) {
+    Mono<Void> validateAccess = accessControlService.validateAccess(AccessContext.builder()
+        .cluster(clusterName)
+        .consumerGroup(consumerGroupId)
+        .consumerGroupActions(VIEW)
+        .build());
+
+    return validateAccess.then(
+        consumerGroupService.getConsumerGroupDetail(getCluster(clusterName), consumerGroupId)
+            .map(ConsumerGroupMapper::toDetailsDto)
+            .map(ResponseEntity::ok)
+    );
   }
   }
 
 
   @Override
   @Override
-  public Mono<ResponseEntity<Flux<ConsumerGroupDTO>>> getTopicConsumerGroups(
-      String clusterName, String topicName, ServerWebExchange exchange) {
-    return consumerGroupService.getConsumerGroupsForTopic(getCluster(clusterName), topicName)
-        .map(Flux::fromIterable)
-        .map(f -> f.map(ConsumerGroupMapper::toDto))
-        .map(ResponseEntity::ok)
-        .switchIfEmpty(Mono.just(ResponseEntity.notFound().build()));
+  public Mono<ResponseEntity<Flux<ConsumerGroupDTO>>> getTopicConsumerGroups(String clusterName,
+                                                                             String topicName,
+                                                                             ServerWebExchange exchange) {
+    Mono<Void> validateAccess = accessControlService.validateAccess(AccessContext.builder()
+        .cluster(clusterName)
+        .topic(topicName)
+        .topicActions(TopicAction.VIEW)
+        .build());
+
+    Mono<ResponseEntity<Flux<ConsumerGroupDTO>>> job =
+        consumerGroupService.getConsumerGroupsForTopic(getCluster(clusterName), topicName)
+            .flatMapMany(Flux::fromIterable)
+            .filterWhen(cg -> accessControlService.isConsumerGroupAccessible(cg.getGroupId(), clusterName))
+            .map(ConsumerGroupMapper::toDto)
+            .collectList()
+            .map(Flux::fromIterable)
+            .map(ResponseEntity::ok)
+            .switchIfEmpty(Mono.just(ResponseEntity.notFound().build()));
+
+    return validateAccess.then(job);
   }
   }
 
 
   @Override
   @Override
@@ -72,70 +111,89 @@ public class ConsumerGroupsController extends AbstractController implements Cons
       ConsumerGroupOrderingDTO orderBy,
       ConsumerGroupOrderingDTO orderBy,
       SortOrderDTO sortOrderDto,
       SortOrderDTO sortOrderDto,
       ServerWebExchange exchange) {
       ServerWebExchange exchange) {
-    return consumerGroupService.getConsumerGroupsPage(
-            getCluster(clusterName),
-            Optional.ofNullable(page).filter(i -> i > 0).orElse(1),
-            Optional.ofNullable(perPage).filter(i -> i > 0).orElse(defaultConsumerGroupsPageSize),
-            search,
-            Optional.ofNullable(orderBy).orElse(ConsumerGroupOrderingDTO.NAME),
-            Optional.ofNullable(sortOrderDto).orElse(SortOrderDTO.ASC)
-        )
-        .map(this::convertPage)
-        .map(ResponseEntity::ok);
-  }
 
 
-  private ConsumerGroupsPageResponseDTO convertPage(ConsumerGroupService.ConsumerGroupsPage
-                                                        consumerGroupConsumerGroupsPage) {
-    return new ConsumerGroupsPageResponseDTO()
-        .pageCount(consumerGroupConsumerGroupsPage.getTotalPages())
-        .consumerGroups(consumerGroupConsumerGroupsPage.getConsumerGroups()
-            .stream()
-            .map(ConsumerGroupMapper::toDto)
-            .collect(Collectors.toList()));
+    Mono<Void> validateAccess = accessControlService.validateAccess(AccessContext.builder()
+        .cluster(clusterName)
+        // consumer group access validation is within the service
+        .build());
+
+    return validateAccess.then(
+        consumerGroupService.getConsumerGroupsPage(
+                getCluster(clusterName),
+                Optional.ofNullable(page).filter(i -> i > 0).orElse(1),
+                Optional.ofNullable(perPage).filter(i -> i > 0).orElse(defaultConsumerGroupsPageSize),
+                search,
+                Optional.ofNullable(orderBy).orElse(ConsumerGroupOrderingDTO.NAME),
+                Optional.ofNullable(sortOrderDto).orElse(SortOrderDTO.ASC)
+            )
+            .map(this::convertPage)
+            .map(ResponseEntity::ok)
+    );
   }
   }
 
 
   @Override
   @Override
-  public Mono<ResponseEntity<Void>> resetConsumerGroupOffsets(String clusterName, String group,
-                                                              Mono<ConsumerGroupOffsetsResetDTO>
-                                                                  consumerGroupOffsetsReset,
+  public Mono<ResponseEntity<Void>> resetConsumerGroupOffsets(String clusterName,
+                                                              String group,
+                                                              Mono<ConsumerGroupOffsetsResetDTO> resetDto,
                                                               ServerWebExchange exchange) {
                                                               ServerWebExchange exchange) {
-    return consumerGroupOffsetsReset.flatMap(reset -> {
-      var cluster = getCluster(clusterName);
-      switch (reset.getResetType()) {
-        case EARLIEST:
-          return offsetsResetService
-              .resetToEarliest(cluster, group, reset.getTopic(), reset.getPartitions());
-        case LATEST:
-          return offsetsResetService
-              .resetToLatest(cluster, group, reset.getTopic(), reset.getPartitions());
-        case TIMESTAMP:
-          if (reset.getResetToTimestamp() == null) {
-            return Mono.error(
-                new ValidationException(
-                    "resetToTimestamp is required when TIMESTAMP reset type used"
-                )
-            );
-          }
-          return offsetsResetService
-              .resetToTimestamp(cluster, group, reset.getTopic(), reset.getPartitions(),
-                  reset.getResetToTimestamp());
-        case OFFSET:
-          if (CollectionUtils.isEmpty(reset.getPartitionsOffsets())) {
+    return resetDto.flatMap(reset -> {
+      Mono<Void> validateAccess = accessControlService.validateAccess(AccessContext.builder()
+          .cluster(clusterName)
+          .topic(reset.getTopic())
+          .topicActions(TopicAction.VIEW)
+          .consumerGroupActions(RESET_OFFSETS)
+          .build());
+
+      Supplier<Mono<Void>> mono = () -> {
+        var cluster = getCluster(clusterName);
+        switch (reset.getResetType()) {
+          case EARLIEST:
+            return offsetsResetService
+                .resetToEarliest(cluster, group, reset.getTopic(), reset.getPartitions());
+          case LATEST:
+            return offsetsResetService
+                .resetToLatest(cluster, group, reset.getTopic(), reset.getPartitions());
+          case TIMESTAMP:
+            if (reset.getResetToTimestamp() == null) {
+              return Mono.error(
+                  new ValidationException(
+                      "resetToTimestamp is required when TIMESTAMP reset type used"
+                  )
+              );
+            }
+            return offsetsResetService
+                .resetToTimestamp(cluster, group, reset.getTopic(), reset.getPartitions(),
+                    reset.getResetToTimestamp());
+          case OFFSET:
+            if (CollectionUtils.isEmpty(reset.getPartitionsOffsets())) {
+              return Mono.error(
+                  new ValidationException(
+                      "partitionsOffsets is required when OFFSET reset type used"
+                  )
+              );
+            }
+            Map<Integer, Long> offsets = reset.getPartitionsOffsets().stream()
+                .collect(toMap(PartitionOffsetDTO::getPartition, PartitionOffsetDTO::getOffset));
+            return offsetsResetService.resetToOffsets(cluster, group, reset.getTopic(), offsets);
+          default:
             return Mono.error(
             return Mono.error(
-                new ValidationException(
-                    "partitionsOffsets is required when OFFSET reset type used"
-                )
+                new ValidationException("Unknown resetType " + reset.getResetType())
             );
             );
-          }
-          Map<Integer, Long> offsets = reset.getPartitionsOffsets().stream()
-              .collect(toMap(PartitionOffsetDTO::getPartition, PartitionOffsetDTO::getOffset));
-          return offsetsResetService.resetToOffsets(cluster, group, reset.getTopic(), offsets);
-        default:
-          return Mono.error(
-              new ValidationException("Unknown resetType " + reset.getResetType())
-          );
-      }
+        }
+      };
+
+      return validateAccess.then(mono.get());
     }).thenReturn(ResponseEntity.ok().build());
     }).thenReturn(ResponseEntity.ok().build());
   }
   }
 
 
+  private ConsumerGroupsPageResponseDTO convertPage(ConsumerGroupService.ConsumerGroupsPage
+                                                        consumerGroupConsumerGroupsPage) {
+    return new ConsumerGroupsPageResponseDTO()
+        .pageCount(consumerGroupConsumerGroupsPage.totalPages())
+        .consumerGroups(consumerGroupConsumerGroupsPage.consumerGroups()
+            .stream()
+            .map(ConsumerGroupMapper::toDto)
+            .collect(Collectors.toList()));
+  }
+
 }
 }

+ 0 - 32
kafka-ui-api/src/main/java/com/provectus/kafka/ui/controller/InfoController.java

@@ -1,32 +0,0 @@
-package com.provectus.kafka.ui.controller;
-
-import com.provectus.kafka.ui.api.TimeStampFormatApi;
-import com.provectus.kafka.ui.model.TimeStampFormatDTO;
-import lombok.RequiredArgsConstructor;
-import lombok.extern.slf4j.Slf4j;
-import org.springframework.beans.factory.annotation.Value;
-import org.springframework.http.ResponseEntity;
-import org.springframework.web.bind.annotation.RestController;
-import org.springframework.web.server.ServerWebExchange;
-import reactor.core.publisher.Mono;
-
-@RestController
-@RequiredArgsConstructor
-@Slf4j
-public class InfoController extends AbstractController implements TimeStampFormatApi {
-
-  @Value("${timestamp.format:dd.MM.YYYY HH:mm:ss}")
-  private String timeStampFormat;
-  @Value("${timestamp.format:DD.MM.YYYY HH:mm:ss}")
-  private String timeStampFormatIso;
-
-  @Override
-  public Mono<ResponseEntity<TimeStampFormatDTO>> getTimeStampFormat(ServerWebExchange exchange) {
-    return Mono.just(ResponseEntity.ok(new TimeStampFormatDTO().timeStampFormat(timeStampFormat)));
-  }
-
-  @Override
-  public Mono<ResponseEntity<TimeStampFormatDTO>> getTimeStampFormatISO(ServerWebExchange exchange) {
-    return Mono.just(ResponseEntity.ok(new TimeStampFormatDTO().timeStampFormat(timeStampFormatIso)));
-  }
-}

+ 132 - 34
kafka-ui-api/src/main/java/com/provectus/kafka/ui/controller/KafkaConnectController.java

@@ -11,7 +11,10 @@ import com.provectus.kafka.ui.model.FullConnectorInfoDTO;
 import com.provectus.kafka.ui.model.NewConnectorDTO;
 import com.provectus.kafka.ui.model.NewConnectorDTO;
 import com.provectus.kafka.ui.model.SortOrderDTO;
 import com.provectus.kafka.ui.model.SortOrderDTO;
 import com.provectus.kafka.ui.model.TaskDTO;
 import com.provectus.kafka.ui.model.TaskDTO;
+import com.provectus.kafka.ui.model.rbac.AccessContext;
+import com.provectus.kafka.ui.model.rbac.permission.ConnectAction;
 import com.provectus.kafka.ui.service.KafkaConnectService;
 import com.provectus.kafka.ui.service.KafkaConnectService;
+import com.provectus.kafka.ui.service.rbac.AccessControlService;
 import java.util.Comparator;
 import java.util.Comparator;
 import java.util.Map;
 import java.util.Map;
 import javax.validation.Valid;
 import javax.validation.Valid;
@@ -28,42 +31,83 @@ import reactor.core.publisher.Mono;
 @Slf4j
 @Slf4j
 public class KafkaConnectController extends AbstractController implements KafkaConnectApi {
 public class KafkaConnectController extends AbstractController implements KafkaConnectApi {
   private final KafkaConnectService kafkaConnectService;
   private final KafkaConnectService kafkaConnectService;
+  private final AccessControlService accessControlService;
 
 
   @Override
   @Override
   public Mono<ResponseEntity<Flux<ConnectDTO>>> getConnects(String clusterName,
   public Mono<ResponseEntity<Flux<ConnectDTO>>> getConnects(String clusterName,
                                                             ServerWebExchange exchange) {
                                                             ServerWebExchange exchange) {
-    return kafkaConnectService.getConnects(getCluster(clusterName)).map(ResponseEntity::ok);
+
+    Flux<ConnectDTO> flux = Flux.fromIterable(kafkaConnectService.getConnects(getCluster(clusterName)))
+        .filterWhen(dto -> accessControlService.isConnectAccessible(dto, clusterName));
+
+    return Mono.just(ResponseEntity.ok(flux));
   }
   }
 
 
   @Override
   @Override
   public Mono<ResponseEntity<Flux<String>>> getConnectors(String clusterName, String connectName,
   public Mono<ResponseEntity<Flux<String>>> getConnectors(String clusterName, String connectName,
                                                           ServerWebExchange exchange) {
                                                           ServerWebExchange exchange) {
-    var connectors = kafkaConnectService.getConnectors(getCluster(clusterName), connectName);
-    return Mono.just(ResponseEntity.ok(connectors));
+
+    Mono<Void> validateAccess = accessControlService.validateAccess(AccessContext.builder()
+        .cluster(clusterName)
+        .connect(connectName)
+        .connectActions(ConnectAction.VIEW)
+        .build());
+
+    return validateAccess.thenReturn(
+        ResponseEntity.ok(kafkaConnectService.getConnectors(getCluster(clusterName), connectName))
+    );
   }
   }
 
 
   @Override
   @Override
   public Mono<ResponseEntity<ConnectorDTO>> createConnector(String clusterName, String connectName,
   public Mono<ResponseEntity<ConnectorDTO>> createConnector(String clusterName, String connectName,
                                                             @Valid Mono<NewConnectorDTO> connector,
                                                             @Valid Mono<NewConnectorDTO> connector,
                                                             ServerWebExchange exchange) {
                                                             ServerWebExchange exchange) {
-    return kafkaConnectService.createConnector(getCluster(clusterName), connectName, connector)
-        .map(ResponseEntity::ok);
+
+    Mono<Void> validateAccess = accessControlService.validateAccess(AccessContext.builder()
+        .cluster(clusterName)
+        .connect(connectName)
+        .connectActions(ConnectAction.VIEW, ConnectAction.CREATE)
+        .build());
+
+    return validateAccess.then(
+        kafkaConnectService.createConnector(getCluster(clusterName), connectName, connector)
+            .map(ResponseEntity::ok)
+    );
   }
   }
 
 
   @Override
   @Override
   public Mono<ResponseEntity<ConnectorDTO>> getConnector(String clusterName, String connectName,
   public Mono<ResponseEntity<ConnectorDTO>> getConnector(String clusterName, String connectName,
                                                          String connectorName,
                                                          String connectorName,
                                                          ServerWebExchange exchange) {
                                                          ServerWebExchange exchange) {
-    return kafkaConnectService.getConnector(getCluster(clusterName), connectName, connectorName)
-        .map(ResponseEntity::ok);
+
+    Mono<Void> validateAccess = accessControlService.validateAccess(AccessContext.builder()
+        .cluster(clusterName)
+        .connect(connectName)
+        .connectActions(ConnectAction.VIEW)
+        .connector(connectorName)
+        .build());
+
+    return validateAccess.then(
+        kafkaConnectService.getConnector(getCluster(clusterName), connectName, connectorName)
+            .map(ResponseEntity::ok)
+    );
   }
   }
 
 
   @Override
   @Override
   public Mono<ResponseEntity<Void>> deleteConnector(String clusterName, String connectName,
   public Mono<ResponseEntity<Void>> deleteConnector(String clusterName, String connectName,
                                                     String connectorName,
                                                     String connectorName,
                                                     ServerWebExchange exchange) {
                                                     ServerWebExchange exchange) {
-    return kafkaConnectService.deleteConnector(getCluster(clusterName), connectName, connectorName)
-        .map(ResponseEntity::ok);
+
+    Mono<Void> validateAccess = accessControlService.validateAccess(AccessContext.builder()
+        .cluster(clusterName)
+        .connect(connectName)
+        .connectActions(ConnectAction.VIEW, ConnectAction.EDIT)
+        .build());
+
+    return validateAccess.then(
+        kafkaConnectService.deleteConnector(getCluster(clusterName), connectName, connectorName)
+            .map(ResponseEntity::ok)
+    );
   }
   }
 
 
 
 
@@ -76,11 +120,13 @@ public class KafkaConnectController extends AbstractController implements KafkaC
       ServerWebExchange exchange
       ServerWebExchange exchange
   ) {
   ) {
     var comparator = sortOrder == null || sortOrder.equals(SortOrderDTO.ASC)
     var comparator = sortOrder == null || sortOrder.equals(SortOrderDTO.ASC)
-            ? getConnectorsComparator(orderBy)
-            : getConnectorsComparator(orderBy).reversed();
-    return Mono.just(ResponseEntity.ok(
-        kafkaConnectService.getAllConnectors(getCluster(clusterName), search).sort(comparator))
-    );
+        ? getConnectorsComparator(orderBy)
+        : getConnectorsComparator(orderBy).reversed();
+    Flux<FullConnectorInfoDTO> job = kafkaConnectService.getAllConnectors(getCluster(clusterName), search)
+        .filterWhen(dto -> accessControlService.isConnectAccessible(dto.getConnect(), clusterName))
+        .filterWhen(dto -> accessControlService.isConnectorAccessible(dto.getConnect(), dto.getName(), clusterName));
+
+    return Mono.just(ResponseEntity.ok(job.sort(comparator)));
   }
   }
 
 
   @Override
   @Override
@@ -88,9 +134,18 @@ public class KafkaConnectController extends AbstractController implements KafkaC
                                                                       String connectName,
                                                                       String connectName,
                                                                       String connectorName,
                                                                       String connectorName,
                                                                       ServerWebExchange exchange) {
                                                                       ServerWebExchange exchange) {
-    return kafkaConnectService
-        .getConnectorConfig(getCluster(clusterName), connectName, connectorName)
-        .map(ResponseEntity::ok);
+
+    Mono<Void> validateAccess = accessControlService.validateAccess(AccessContext.builder()
+        .cluster(clusterName)
+        .connect(connectName)
+        .connectActions(ConnectAction.VIEW)
+        .build());
+
+    return validateAccess.then(
+        kafkaConnectService
+            .getConnectorConfig(getCluster(clusterName), connectName, connectorName)
+            .map(ResponseEntity::ok)
+    );
   }
   }
 
 
   @Override
   @Override
@@ -99,9 +154,18 @@ public class KafkaConnectController extends AbstractController implements KafkaC
                                                                String connectorName,
                                                                String connectorName,
                                                                @Valid Mono<Object> requestBody,
                                                                @Valid Mono<Object> requestBody,
                                                                ServerWebExchange exchange) {
                                                                ServerWebExchange exchange) {
-    return kafkaConnectService
-        .setConnectorConfig(getCluster(clusterName), connectName, connectorName, requestBody)
-        .map(ResponseEntity::ok);
+
+    Mono<Void> validateAccess = accessControlService.validateAccess(AccessContext.builder()
+        .cluster(clusterName)
+        .connect(connectName)
+        .connectActions(ConnectAction.VIEW, ConnectAction.EDIT)
+        .build());
+
+    return validateAccess.then(
+        kafkaConnectService
+            .setConnectorConfig(getCluster(clusterName), connectName, connectorName, requestBody)
+            .map(ResponseEntity::ok)
+    );
   }
   }
 
 
   @Override
   @Override
@@ -109,9 +173,18 @@ public class KafkaConnectController extends AbstractController implements KafkaC
                                                          String connectorName,
                                                          String connectorName,
                                                          ConnectorActionDTO action,
                                                          ConnectorActionDTO action,
                                                          ServerWebExchange exchange) {
                                                          ServerWebExchange exchange) {
-    return kafkaConnectService
-        .updateConnectorState(getCluster(clusterName), connectName, connectorName, action)
-        .map(ResponseEntity::ok);
+
+    Mono<Void> validateAccess = accessControlService.validateAccess(AccessContext.builder()
+        .cluster(clusterName)
+        .connect(connectName)
+        .connectActions(ConnectAction.VIEW, ConnectAction.EDIT)
+        .build());
+
+    return validateAccess.then(
+        kafkaConnectService
+            .updateConnectorState(getCluster(clusterName), connectName, connectorName, action)
+            .map(ResponseEntity::ok)
+    );
   }
   }
 
 
   @Override
   @Override
@@ -119,31 +192,56 @@ public class KafkaConnectController extends AbstractController implements KafkaC
                                                                String connectName,
                                                                String connectName,
                                                                String connectorName,
                                                                String connectorName,
                                                                ServerWebExchange exchange) {
                                                                ServerWebExchange exchange) {
-    return Mono.just(ResponseEntity
-        .ok(kafkaConnectService
-            .getConnectorTasks(getCluster(clusterName), connectName, connectorName)));
+    Mono<Void> validateAccess = accessControlService.validateAccess(AccessContext.builder()
+        .cluster(clusterName)
+        .connect(connectName)
+        .connectActions(ConnectAction.VIEW)
+        .build());
+
+    return validateAccess.thenReturn(
+        ResponseEntity
+            .ok(kafkaConnectService
+                .getConnectorTasks(getCluster(clusterName), connectName, connectorName))
+    );
   }
   }
 
 
   @Override
   @Override
   public Mono<ResponseEntity<Void>> restartConnectorTask(String clusterName, String connectName,
   public Mono<ResponseEntity<Void>> restartConnectorTask(String clusterName, String connectName,
                                                          String connectorName, Integer taskId,
                                                          String connectorName, Integer taskId,
                                                          ServerWebExchange exchange) {
                                                          ServerWebExchange exchange) {
-    return kafkaConnectService
-        .restartConnectorTask(getCluster(clusterName), connectName, connectorName, taskId)
-        .map(ResponseEntity::ok);
+
+    Mono<Void> validateAccess = accessControlService.validateAccess(AccessContext.builder()
+        .cluster(clusterName)
+        .connect(connectName)
+        .connectActions(ConnectAction.VIEW, ConnectAction.EDIT)
+        .build());
+
+    return validateAccess.then(
+        kafkaConnectService
+            .restartConnectorTask(getCluster(clusterName), connectName, connectorName, taskId)
+            .map(ResponseEntity::ok)
+    );
   }
   }
 
 
   @Override
   @Override
   public Mono<ResponseEntity<Flux<ConnectorPluginDTO>>> getConnectorPlugins(
   public Mono<ResponseEntity<Flux<ConnectorPluginDTO>>> getConnectorPlugins(
       String clusterName, String connectName, ServerWebExchange exchange) {
       String clusterName, String connectName, ServerWebExchange exchange) {
-    return kafkaConnectService
-        .getConnectorPlugins(getCluster(clusterName), connectName)
-        .map(ResponseEntity::ok);
+
+    Mono<Void> validateAccess = accessControlService.validateAccess(AccessContext.builder()
+        .cluster(clusterName)
+        .connect(connectName)
+        .connectActions(ConnectAction.VIEW)
+        .build());
+
+    return validateAccess.then(
+        Mono.just(
+            ResponseEntity.ok(
+                kafkaConnectService.getConnectorPlugins(getCluster(clusterName), connectName)))
+    );
   }
   }
 
 
   @Override
   @Override
-  public Mono<ResponseEntity<ConnectorPluginConfigValidationResponseDTO>>
-      validateConnectorPluginConfig(
+  public Mono<ResponseEntity<ConnectorPluginConfigValidationResponseDTO>> validateConnectorPluginConfig(
       String clusterName, String connectName, String pluginName, @Valid Mono<Object> requestBody,
       String clusterName, String connectName, String pluginName, @Valid Mono<Object> requestBody,
       ServerWebExchange exchange) {
       ServerWebExchange exchange) {
     return kafkaConnectService
     return kafkaConnectService

+ 39 - 13
kafka-ui-api/src/main/java/com/provectus/kafka/ui/controller/KsqlController.java

@@ -7,7 +7,10 @@ import com.provectus.kafka.ui.model.KsqlResponseDTO;
 import com.provectus.kafka.ui.model.KsqlStreamDescriptionDTO;
 import com.provectus.kafka.ui.model.KsqlStreamDescriptionDTO;
 import com.provectus.kafka.ui.model.KsqlTableDescriptionDTO;
 import com.provectus.kafka.ui.model.KsqlTableDescriptionDTO;
 import com.provectus.kafka.ui.model.KsqlTableResponseDTO;
 import com.provectus.kafka.ui.model.KsqlTableResponseDTO;
+import com.provectus.kafka.ui.model.rbac.AccessContext;
+import com.provectus.kafka.ui.model.rbac.permission.KsqlAction;
 import com.provectus.kafka.ui.service.ksql.KsqlServiceV2;
 import com.provectus.kafka.ui.service.ksql.KsqlServiceV2;
+import com.provectus.kafka.ui.service.rbac.AccessControlService;
 import java.util.List;
 import java.util.List;
 import java.util.Map;
 import java.util.Map;
 import java.util.Optional;
 import java.util.Optional;
@@ -19,51 +22,74 @@ import org.springframework.web.server.ServerWebExchange;
 import reactor.core.publisher.Flux;
 import reactor.core.publisher.Flux;
 import reactor.core.publisher.Mono;
 import reactor.core.publisher.Mono;
 
 
-
 @RestController
 @RestController
 @RequiredArgsConstructor
 @RequiredArgsConstructor
 @Slf4j
 @Slf4j
 public class KsqlController extends AbstractController implements KsqlApi {
 public class KsqlController extends AbstractController implements KsqlApi {
 
 
   private final KsqlServiceV2 ksqlServiceV2;
   private final KsqlServiceV2 ksqlServiceV2;
+  private final AccessControlService accessControlService;
 
 
   @Override
   @Override
   public Mono<ResponseEntity<KsqlCommandV2ResponseDTO>> executeKsql(String clusterName,
   public Mono<ResponseEntity<KsqlCommandV2ResponseDTO>> executeKsql(String clusterName,
                                                                     Mono<KsqlCommandV2DTO>
                                                                     Mono<KsqlCommandV2DTO>
                                                                         ksqlCommand2Dto,
                                                                         ksqlCommand2Dto,
                                                                     ServerWebExchange exchange) {
                                                                     ServerWebExchange exchange) {
-    return ksqlCommand2Dto.map(dto -> {
-      var id = ksqlServiceV2.registerCommand(
-          getCluster(clusterName),
-          dto.getKsql(),
-          Optional.ofNullable(dto.getStreamsProperties()).orElse(Map.of()));
-      return new KsqlCommandV2ResponseDTO().pipeId(id);
-    }).map(ResponseEntity::ok);
+    Mono<Void> validateAccess = accessControlService.validateAccess(AccessContext.builder()
+        .cluster(clusterName)
+        .ksqlActions(KsqlAction.EXECUTE)
+        .build());
+
+    return validateAccess.then(
+        ksqlCommand2Dto.map(dto -> {
+          var id = ksqlServiceV2.registerCommand(
+              getCluster(clusterName),
+              dto.getKsql(),
+              Optional.ofNullable(dto.getStreamsProperties()).orElse(Map.of()));
+          return new KsqlCommandV2ResponseDTO().pipeId(id);
+        }).map(ResponseEntity::ok)
+    );
   }
   }
 
 
   @Override
   @Override
   public Mono<ResponseEntity<Flux<KsqlResponseDTO>>> openKsqlResponsePipe(String clusterName,
   public Mono<ResponseEntity<Flux<KsqlResponseDTO>>> openKsqlResponsePipe(String clusterName,
                                                                           String pipeId,
                                                                           String pipeId,
                                                                           ServerWebExchange exchange) {
                                                                           ServerWebExchange exchange) {
-    return Mono.just(
+    Mono<Void> validateAccess = accessControlService.validateAccess(AccessContext.builder()
+        .cluster(clusterName)
+        .ksqlActions(KsqlAction.EXECUTE)
+        .build());
+
+    return validateAccess.thenReturn(
         ResponseEntity.ok(ksqlServiceV2.execute(pipeId)
         ResponseEntity.ok(ksqlServiceV2.execute(pipeId)
             .map(table -> new KsqlResponseDTO()
             .map(table -> new KsqlResponseDTO()
                 .table(
                 .table(
                     new KsqlTableResponseDTO()
                     new KsqlTableResponseDTO()
                         .header(table.getHeader())
                         .header(table.getHeader())
                         .columnNames(table.getColumnNames())
                         .columnNames(table.getColumnNames())
-                        .values((List<List<Object>>) ((List<?>) (table.getValues())))))));
+                        .values((List<List<Object>>) ((List<?>) (table.getValues()))))))
+    );
   }
   }
 
 
   @Override
   @Override
   public Mono<ResponseEntity<Flux<KsqlStreamDescriptionDTO>>> listStreams(String clusterName,
   public Mono<ResponseEntity<Flux<KsqlStreamDescriptionDTO>>> listStreams(String clusterName,
-                                                                         ServerWebExchange exchange) {
-    return Mono.just(ResponseEntity.ok(ksqlServiceV2.listStreams(getCluster(clusterName))));
+                                                                          ServerWebExchange exchange) {
+    Mono<Void> validateAccess = accessControlService.validateAccess(AccessContext.builder()
+        .cluster(clusterName)
+        .ksqlActions(KsqlAction.EXECUTE)
+        .build());
+
+    return validateAccess.thenReturn(ResponseEntity.ok(ksqlServiceV2.listStreams(getCluster(clusterName))));
   }
   }
 
 
   @Override
   @Override
   public Mono<ResponseEntity<Flux<KsqlTableDescriptionDTO>>> listTables(String clusterName,
   public Mono<ResponseEntity<Flux<KsqlTableDescriptionDTO>>> listTables(String clusterName,
                                                                         ServerWebExchange exchange) {
                                                                         ServerWebExchange exchange) {
-    return Mono.just(ResponseEntity.ok(ksqlServiceV2.listTables(getCluster(clusterName))));
+    Mono<Void> validateAccess = accessControlService.validateAccess(AccessContext.builder()
+        .cluster(clusterName)
+        .ksqlActions(KsqlAction.EXECUTE)
+        .build());
+
+    return validateAccess.thenReturn(ResponseEntity.ok(ksqlServiceV2.listTables(getCluster(clusterName))));
   }
   }
 }
 }

+ 63 - 18
kafka-ui-api/src/main/java/com/provectus/kafka/ui/controller/MessagesController.java

@@ -1,5 +1,8 @@
 package com.provectus.kafka.ui.controller;
 package com.provectus.kafka.ui.controller;
 
 
+import static com.provectus.kafka.ui.model.rbac.permission.TopicAction.MESSAGES_DELETE;
+import static com.provectus.kafka.ui.model.rbac.permission.TopicAction.MESSAGES_PRODUCE;
+import static com.provectus.kafka.ui.model.rbac.permission.TopicAction.MESSAGES_READ;
 import static com.provectus.kafka.ui.serde.api.Serde.Target.KEY;
 import static com.provectus.kafka.ui.serde.api.Serde.Target.KEY;
 import static com.provectus.kafka.ui.serde.api.Serde.Target.VALUE;
 import static com.provectus.kafka.ui.serde.api.Serde.Target.VALUE;
 import static java.util.stream.Collectors.toMap;
 import static java.util.stream.Collectors.toMap;
@@ -14,8 +17,11 @@ import com.provectus.kafka.ui.model.SeekTypeDTO;
 import com.provectus.kafka.ui.model.SerdeUsageDTO;
 import com.provectus.kafka.ui.model.SerdeUsageDTO;
 import com.provectus.kafka.ui.model.TopicMessageEventDTO;
 import com.provectus.kafka.ui.model.TopicMessageEventDTO;
 import com.provectus.kafka.ui.model.TopicSerdeSuggestionDTO;
 import com.provectus.kafka.ui.model.TopicSerdeSuggestionDTO;
+import com.provectus.kafka.ui.model.rbac.AccessContext;
+import com.provectus.kafka.ui.model.rbac.permission.TopicAction;
 import com.provectus.kafka.ui.service.DeserializationService;
 import com.provectus.kafka.ui.service.DeserializationService;
 import com.provectus.kafka.ui.service.MessagesService;
 import com.provectus.kafka.ui.service.MessagesService;
+import com.provectus.kafka.ui.service.rbac.AccessControlService;
 import java.util.List;
 import java.util.List;
 import java.util.Map;
 import java.util.Map;
 import java.util.Optional;
 import java.util.Optional;
@@ -30,6 +36,7 @@ import org.springframework.web.bind.annotation.RestController;
 import org.springframework.web.server.ServerWebExchange;
 import org.springframework.web.server.ServerWebExchange;
 import reactor.core.publisher.Flux;
 import reactor.core.publisher.Flux;
 import reactor.core.publisher.Mono;
 import reactor.core.publisher.Mono;
+import reactor.core.scheduler.Schedulers;
 
 
 @RestController
 @RestController
 @RequiredArgsConstructor
 @RequiredArgsConstructor
@@ -41,16 +48,26 @@ public class MessagesController extends AbstractController implements MessagesAp
 
 
   private final MessagesService messagesService;
   private final MessagesService messagesService;
   private final DeserializationService deserializationService;
   private final DeserializationService deserializationService;
+  private final AccessControlService accessControlService;
 
 
   @Override
   @Override
   public Mono<ResponseEntity<Void>> deleteTopicMessages(
   public Mono<ResponseEntity<Void>> deleteTopicMessages(
       String clusterName, String topicName, @Valid List<Integer> partitions,
       String clusterName, String topicName, @Valid List<Integer> partitions,
       ServerWebExchange exchange) {
       ServerWebExchange exchange) {
-    return messagesService.deleteTopicMessages(
-        getCluster(clusterName),
-        topicName,
-        Optional.ofNullable(partitions).orElse(List.of())
-    ).thenReturn(ResponseEntity.ok().build());
+
+    Mono<Void> validateAccess = accessControlService.validateAccess(AccessContext.builder()
+        .cluster(clusterName)
+        .topic(topicName)
+        .topicActions(MESSAGES_DELETE)
+        .build());
+
+    return validateAccess.then(
+        messagesService.deleteTopicMessages(
+            getCluster(clusterName),
+            topicName,
+            Optional.ofNullable(partitions).orElse(List.of())
+        ).thenReturn(ResponseEntity.ok().build())
+    );
   }
   }
 
 
   @Override
   @Override
@@ -65,6 +82,12 @@ public class MessagesController extends AbstractController implements MessagesAp
                                                                            String keySerde,
                                                                            String keySerde,
                                                                            String valueSerde,
                                                                            String valueSerde,
                                                                            ServerWebExchange exchange) {
                                                                            ServerWebExchange exchange) {
+    final Mono<Void> validateAccess = accessControlService.validateAccess(AccessContext.builder()
+        .cluster(clusterName)
+        .topic(topicName)
+        .topicActions(MESSAGES_READ)
+        .build());
+
     seekType = seekType != null ? seekType : SeekTypeDTO.BEGINNING;
     seekType = seekType != null ? seekType : SeekTypeDTO.BEGINNING;
     seekDirection = seekDirection != null ? seekDirection : SeekDirectionDTO.FORWARD;
     seekDirection = seekDirection != null ? seekDirection : SeekDirectionDTO.FORWARD;
     filterQueryType = filterQueryType != null ? filterQueryType : MessageFilterTypeDTO.STRING_CONTAINS;
     filterQueryType = filterQueryType != null ? filterQueryType : MessageFilterTypeDTO.STRING_CONTAINS;
@@ -76,22 +99,33 @@ public class MessagesController extends AbstractController implements MessagesAp
         topicName,
         topicName,
         parseSeekTo(topicName, seekType, seekTo)
         parseSeekTo(topicName, seekType, seekTo)
     );
     );
-    return Mono.just(
+    Mono<ResponseEntity<Flux<TopicMessageEventDTO>>> job = Mono.just(
         ResponseEntity.ok(
         ResponseEntity.ok(
             messagesService.loadMessages(
             messagesService.loadMessages(
                 getCluster(clusterName), topicName, positions, q, filterQueryType,
                 getCluster(clusterName), topicName, positions, q, filterQueryType,
                 recordsLimit, seekDirection, keySerde, valueSerde)
                 recordsLimit, seekDirection, keySerde, valueSerde)
         )
         )
     );
     );
+
+    return validateAccess.then(job);
   }
   }
 
 
   @Override
   @Override
   public Mono<ResponseEntity<Void>> sendTopicMessages(
   public Mono<ResponseEntity<Void>> sendTopicMessages(
       String clusterName, String topicName, @Valid Mono<CreateTopicMessageDTO> createTopicMessage,
       String clusterName, String topicName, @Valid Mono<CreateTopicMessageDTO> createTopicMessage,
       ServerWebExchange exchange) {
       ServerWebExchange exchange) {
-    return createTopicMessage.flatMap(msg ->
-        messagesService.sendMessage(getCluster(clusterName), topicName, msg).then()
-    ).map(ResponseEntity::ok);
+
+    Mono<Void> validateAccess = accessControlService.validateAccess(AccessContext.builder()
+        .cluster(clusterName)
+        .topic(topicName)
+        .topicActions(MESSAGES_PRODUCE)
+        .build());
+
+    return validateAccess.then(
+        createTopicMessage.flatMap(msg ->
+            messagesService.sendMessage(getCluster(clusterName), topicName, msg).then()
+        ).map(ResponseEntity::ok)
+    );
   }
   }
 
 
   /**
   /**
@@ -127,14 +161,25 @@ public class MessagesController extends AbstractController implements MessagesAp
                                                                  String topicName,
                                                                  String topicName,
                                                                  SerdeUsageDTO use,
                                                                  SerdeUsageDTO use,
                                                                  ServerWebExchange exchange) {
                                                                  ServerWebExchange exchange) {
-    return Mono.just(
-        new TopicSerdeSuggestionDTO()
-            .key(use == SerdeUsageDTO.SERIALIZE
-                ? deserializationService.getSerdesForSerialize(getCluster(clusterName), topicName, KEY)
-                : deserializationService.getSerdesForDeserialize(getCluster(clusterName), topicName, KEY))
-            .value(use == SerdeUsageDTO.SERIALIZE
-                ? deserializationService.getSerdesForSerialize(getCluster(clusterName), topicName, VALUE)
-                : deserializationService.getSerdesForDeserialize(getCluster(clusterName), topicName, VALUE))
-    ).map(ResponseEntity::ok);
+
+    Mono<Void> validateAccess = accessControlService.validateAccess(AccessContext.builder()
+        .cluster(clusterName)
+        .topic(topicName)
+        .topicActions(TopicAction.VIEW)
+        .build());
+
+    TopicSerdeSuggestionDTO dto = new TopicSerdeSuggestionDTO()
+        .key(use == SerdeUsageDTO.SERIALIZE
+            ? deserializationService.getSerdesForSerialize(getCluster(clusterName), topicName, KEY)
+            : deserializationService.getSerdesForDeserialize(getCluster(clusterName), topicName, KEY))
+        .value(use == SerdeUsageDTO.SERIALIZE
+            ? deserializationService.getSerdesForSerialize(getCluster(clusterName), topicName, VALUE)
+            : deserializationService.getSerdesForDeserialize(getCluster(clusterName), topicName, VALUE));
+
+    return validateAccess.then(
+        Mono.just(dto)
+            .subscribeOn(Schedulers.boundedElastic())
+            .map(ResponseEntity::ok)
+    );
   }
   }
 }
 }

+ 146 - 43
kafka-ui-api/src/main/java/com/provectus/kafka/ui/controller/SchemasController.java

@@ -2,15 +2,18 @@ package com.provectus.kafka.ui.controller;
 
 
 import com.provectus.kafka.ui.api.SchemasApi;
 import com.provectus.kafka.ui.api.SchemasApi;
 import com.provectus.kafka.ui.exception.ValidationException;
 import com.provectus.kafka.ui.exception.ValidationException;
-import com.provectus.kafka.ui.mapper.ClusterMapper;
+import com.provectus.kafka.ui.mapper.KafkaSrMapper;
+import com.provectus.kafka.ui.mapper.KafkaSrMapperImpl;
 import com.provectus.kafka.ui.model.CompatibilityCheckResponseDTO;
 import com.provectus.kafka.ui.model.CompatibilityCheckResponseDTO;
 import com.provectus.kafka.ui.model.CompatibilityLevelDTO;
 import com.provectus.kafka.ui.model.CompatibilityLevelDTO;
 import com.provectus.kafka.ui.model.KafkaCluster;
 import com.provectus.kafka.ui.model.KafkaCluster;
 import com.provectus.kafka.ui.model.NewSchemaSubjectDTO;
 import com.provectus.kafka.ui.model.NewSchemaSubjectDTO;
 import com.provectus.kafka.ui.model.SchemaSubjectDTO;
 import com.provectus.kafka.ui.model.SchemaSubjectDTO;
 import com.provectus.kafka.ui.model.SchemaSubjectsResponseDTO;
 import com.provectus.kafka.ui.model.SchemaSubjectsResponseDTO;
+import com.provectus.kafka.ui.model.rbac.AccessContext;
+import com.provectus.kafka.ui.model.rbac.permission.SchemaAction;
 import com.provectus.kafka.ui.service.SchemaRegistryService;
 import com.provectus.kafka.ui.service.SchemaRegistryService;
-import java.util.Arrays;
+import com.provectus.kafka.ui.service.rbac.AccessControlService;
 import java.util.List;
 import java.util.List;
 import java.util.stream.Collectors;
 import java.util.stream.Collectors;
 import javax.validation.Valid;
 import javax.validation.Valid;
@@ -30,14 +33,15 @@ public class SchemasController extends AbstractController implements SchemasApi
 
 
   private static final Integer DEFAULT_PAGE_SIZE = 25;
   private static final Integer DEFAULT_PAGE_SIZE = 25;
 
 
-  private final ClusterMapper mapper;
+  private final KafkaSrMapper kafkaSrMapper = new KafkaSrMapperImpl();
 
 
   private final SchemaRegistryService schemaRegistryService;
   private final SchemaRegistryService schemaRegistryService;
+  private final AccessControlService accessControlService;
 
 
   @Override
   @Override
   protected KafkaCluster getCluster(String clusterName) {
   protected KafkaCluster getCluster(String clusterName) {
     var c = super.getCluster(clusterName);
     var c = super.getCluster(clusterName);
-    if (c.getSchemaRegistry() == null) {
+    if (c.getSchemaRegistryClient() == null) {
       throw new ValidationException("Schema Registry is not set for cluster " + clusterName);
       throw new ValidationException("Schema Registry is not set for cluster " + clusterName);
     }
     }
     return c;
     return c;
@@ -45,74 +49,148 @@ public class SchemasController extends AbstractController implements SchemasApi
 
 
   @Override
   @Override
   public Mono<ResponseEntity<CompatibilityCheckResponseDTO>> checkSchemaCompatibility(
   public Mono<ResponseEntity<CompatibilityCheckResponseDTO>> checkSchemaCompatibility(
-      String clusterName, String subject, @Valid Mono<NewSchemaSubjectDTO> newSchemaSubject,
+      String clusterName, String subject, @Valid Mono<NewSchemaSubjectDTO> newSchemaSubjectMono,
       ServerWebExchange exchange) {
       ServerWebExchange exchange) {
-    return schemaRegistryService.checksSchemaCompatibility(
-            getCluster(clusterName), subject, newSchemaSubject)
-        .map(mapper::toCompatibilityCheckResponse)
-        .map(ResponseEntity::ok);
+    Mono<Void> validateAccess = accessControlService.validateAccess(AccessContext.builder()
+        .cluster(clusterName)
+        .schema(subject)
+        .schemaActions(SchemaAction.VIEW)
+        .build());
+
+    return validateAccess.then(
+        newSchemaSubjectMono.flatMap(subjectDTO ->
+                schemaRegistryService.checksSchemaCompatibility(
+                    getCluster(clusterName),
+                    subject,
+                    kafkaSrMapper.fromDto(subjectDTO)
+                ))
+            .map(kafkaSrMapper::toDto)
+            .map(ResponseEntity::ok)
+    );
   }
   }
 
 
   @Override
   @Override
   public Mono<ResponseEntity<SchemaSubjectDTO>> createNewSchema(
   public Mono<ResponseEntity<SchemaSubjectDTO>> createNewSchema(
-      String clusterName, @Valid Mono<NewSchemaSubjectDTO> newSchemaSubject,
+      String clusterName, @Valid Mono<NewSchemaSubjectDTO> newSchemaSubjectMono,
       ServerWebExchange exchange) {
       ServerWebExchange exchange) {
-    return schemaRegistryService
-        .registerNewSchema(getCluster(clusterName), newSchemaSubject)
-        .map(ResponseEntity::ok);
+    Mono<Void> validateAccess = accessControlService.validateAccess(AccessContext.builder()
+        .cluster(clusterName)
+        .schemaActions(SchemaAction.CREATE)
+        .build());
+
+    return validateAccess.then(
+        newSchemaSubjectMono.flatMap(newSubject ->
+                schemaRegistryService.registerNewSchema(
+                    getCluster(clusterName),
+                    newSubject.getSubject(),
+                    kafkaSrMapper.fromDto(newSubject)
+                )
+            ).map(kafkaSrMapper::toDto)
+            .map(ResponseEntity::ok)
+    );
   }
   }
 
 
   @Override
   @Override
   public Mono<ResponseEntity<Void>> deleteLatestSchema(
   public Mono<ResponseEntity<Void>> deleteLatestSchema(
       String clusterName, String subject, ServerWebExchange exchange) {
       String clusterName, String subject, ServerWebExchange exchange) {
-    return schemaRegistryService.deleteLatestSchemaSubject(getCluster(clusterName), subject)
-        .thenReturn(ResponseEntity.ok().build());
+    Mono<Void> validateAccess = accessControlService.validateAccess(AccessContext.builder()
+        .cluster(clusterName)
+        .schema(subject)
+        .schemaActions(SchemaAction.DELETE)
+        .build());
+
+    return validateAccess.then(
+        schemaRegistryService.deleteLatestSchemaSubject(getCluster(clusterName), subject)
+            .thenReturn(ResponseEntity.ok().build())
+    );
   }
   }
 
 
   @Override
   @Override
   public Mono<ResponseEntity<Void>> deleteSchema(
   public Mono<ResponseEntity<Void>> deleteSchema(
-      String clusterName, String subjectName, ServerWebExchange exchange) {
-    return schemaRegistryService.deleteSchemaSubjectEntirely(getCluster(clusterName), subjectName)
-        .thenReturn(ResponseEntity.ok().build());
+      String clusterName, String subject, ServerWebExchange exchange) {
+    Mono<Void> validateAccess = accessControlService.validateAccess(AccessContext.builder()
+        .cluster(clusterName)
+        .schema(subject)
+        .schemaActions(SchemaAction.DELETE)
+        .build());
+
+    return validateAccess.then(
+        schemaRegistryService.deleteSchemaSubjectEntirely(getCluster(clusterName), subject)
+            .thenReturn(ResponseEntity.ok().build())
+    );
   }
   }
 
 
   @Override
   @Override
   public Mono<ResponseEntity<Void>> deleteSchemaByVersion(
   public Mono<ResponseEntity<Void>> deleteSchemaByVersion(
       String clusterName, String subjectName, Integer version, ServerWebExchange exchange) {
       String clusterName, String subjectName, Integer version, ServerWebExchange exchange) {
-    return schemaRegistryService.deleteSchemaSubjectByVersion(getCluster(clusterName), subjectName, version)
-        .thenReturn(ResponseEntity.ok().build());
+    Mono<Void> validateAccess = accessControlService.validateAccess(AccessContext.builder()
+        .cluster(clusterName)
+        .schema(subjectName)
+        .schemaActions(SchemaAction.DELETE)
+        .build());
+
+    return validateAccess.then(
+        schemaRegistryService.deleteSchemaSubjectByVersion(getCluster(clusterName), subjectName, version)
+            .thenReturn(ResponseEntity.ok().build())
+    );
   }
   }
 
 
   @Override
   @Override
   public Mono<ResponseEntity<Flux<SchemaSubjectDTO>>> getAllVersionsBySubject(
   public Mono<ResponseEntity<Flux<SchemaSubjectDTO>>> getAllVersionsBySubject(
       String clusterName, String subjectName, ServerWebExchange exchange) {
       String clusterName, String subjectName, ServerWebExchange exchange) {
+    Mono<Void> validateAccess = accessControlService.validateAccess(AccessContext.builder()
+        .cluster(clusterName)
+        .schema(subjectName)
+        .schemaActions(SchemaAction.VIEW)
+        .build());
+
     Flux<SchemaSubjectDTO> schemas =
     Flux<SchemaSubjectDTO> schemas =
-        schemaRegistryService.getAllVersionsBySubject(getCluster(clusterName), subjectName);
-    return Mono.just(ResponseEntity.ok(schemas));
+        schemaRegistryService.getAllVersionsBySubject(getCluster(clusterName), subjectName)
+            .map(kafkaSrMapper::toDto);
+    return validateAccess.thenReturn(ResponseEntity.ok(schemas));
   }
   }
 
 
   @Override
   @Override
   public Mono<ResponseEntity<CompatibilityLevelDTO>> getGlobalSchemaCompatibilityLevel(
   public Mono<ResponseEntity<CompatibilityLevelDTO>> getGlobalSchemaCompatibilityLevel(
       String clusterName, ServerWebExchange exchange) {
       String clusterName, ServerWebExchange exchange) {
     return schemaRegistryService.getGlobalSchemaCompatibilityLevel(getCluster(clusterName))
     return schemaRegistryService.getGlobalSchemaCompatibilityLevel(getCluster(clusterName))
-        .map(mapper::toCompatibilityLevelDto)
+        .map(c -> new CompatibilityLevelDTO().compatibility(kafkaSrMapper.toDto(c)))
         .map(ResponseEntity::ok)
         .map(ResponseEntity::ok)
         .defaultIfEmpty(ResponseEntity.notFound().build());
         .defaultIfEmpty(ResponseEntity.notFound().build());
   }
   }
 
 
   @Override
   @Override
-  public Mono<ResponseEntity<SchemaSubjectDTO>> getLatestSchema(String clusterName, String subject,
+  public Mono<ResponseEntity<SchemaSubjectDTO>> getLatestSchema(String clusterName,
+                                                                String subject,
                                                                 ServerWebExchange exchange) {
                                                                 ServerWebExchange exchange) {
-    return schemaRegistryService.getLatestSchemaVersionBySubject(getCluster(clusterName), subject)
-        .map(ResponseEntity::ok);
+    Mono<Void> validateAccess = accessControlService.validateAccess(AccessContext.builder()
+        .cluster(clusterName)
+        .schema(subject)
+        .schemaActions(SchemaAction.VIEW)
+        .build());
+
+    return validateAccess.then(
+        schemaRegistryService.getLatestSchemaVersionBySubject(getCluster(clusterName), subject)
+            .map(kafkaSrMapper::toDto)
+            .map(ResponseEntity::ok)
+    );
   }
   }
 
 
   @Override
   @Override
   public Mono<ResponseEntity<SchemaSubjectDTO>> getSchemaByVersion(
   public Mono<ResponseEntity<SchemaSubjectDTO>> getSchemaByVersion(
       String clusterName, String subject, Integer version, ServerWebExchange exchange) {
       String clusterName, String subject, Integer version, ServerWebExchange exchange) {
-    return schemaRegistryService.getSchemaSubjectByVersion(
-            getCluster(clusterName), subject, version)
-        .map(ResponseEntity::ok);
+    Mono<Void> validateAccess = accessControlService.validateAccess(AccessContext.builder()
+        .cluster(clusterName)
+        .schema(subject)
+        .schemaActions(SchemaAction.VIEW)
+        .build());
+
+    return validateAccess.then(
+        schemaRegistryService.getSchemaSubjectByVersion(
+                getCluster(clusterName), subject, version)
+            .map(kafkaSrMapper::toDto)
+            .map(ResponseEntity::ok)
+    );
   }
   }
 
 
   @Override
   @Override
@@ -123,13 +201,16 @@ public class SchemasController extends AbstractController implements SchemasApi
                                                                     ServerWebExchange serverWebExchange) {
                                                                     ServerWebExchange serverWebExchange) {
     return schemaRegistryService
     return schemaRegistryService
         .getAllSubjectNames(getCluster(clusterName))
         .getAllSubjectNames(getCluster(clusterName))
+        .flatMapIterable(l -> l)
+        .filterWhen(schema -> accessControlService.isSchemaAccessible(schema, clusterName))
+        .collectList()
         .flatMap(subjects -> {
         .flatMap(subjects -> {
           int pageSize = perPage != null && perPage > 0 ? perPage : DEFAULT_PAGE_SIZE;
           int pageSize = perPage != null && perPage > 0 ? perPage : DEFAULT_PAGE_SIZE;
           int subjectToSkip = ((pageNum != null && pageNum > 0 ? pageNum : 1) - 1) * pageSize;
           int subjectToSkip = ((pageNum != null && pageNum > 0 ? pageNum : 1) - 1) * pageSize;
-          List<String> filteredSubjects = Arrays.stream(subjects)
+          List<String> filteredSubjects = subjects
+              .stream()
               .filter(subj -> search == null || StringUtils.containsIgnoreCase(subj, search))
               .filter(subj -> search == null || StringUtils.containsIgnoreCase(subj, search))
-              .sorted()
-              .collect(Collectors.toList());
+              .sorted().toList();
           var totalPages = (filteredSubjects.size() / pageSize)
           var totalPages = (filteredSubjects.size() / pageSize)
               + (filteredSubjects.size() % pageSize == 0 ? 0 : 1);
               + (filteredSubjects.size() % pageSize == 0 ? 0 : 1);
           List<String> subjectsToRender = filteredSubjects.stream()
           List<String> subjectsToRender = filteredSubjects.stream()
@@ -137,27 +218,49 @@ public class SchemasController extends AbstractController implements SchemasApi
               .limit(pageSize)
               .limit(pageSize)
               .collect(Collectors.toList());
               .collect(Collectors.toList());
           return schemaRegistryService.getAllLatestVersionSchemas(getCluster(clusterName), subjectsToRender)
           return schemaRegistryService.getAllLatestVersionSchemas(getCluster(clusterName), subjectsToRender)
-              .map(a -> new SchemaSubjectsResponseDTO().pageCount(totalPages).schemas(a));
+              .map(subjs -> subjs.stream().map(kafkaSrMapper::toDto).toList())
+              .map(subjs -> new SchemaSubjectsResponseDTO().pageCount(totalPages).schemas(subjs));
         }).map(ResponseEntity::ok);
         }).map(ResponseEntity::ok);
   }
   }
 
 
   @Override
   @Override
   public Mono<ResponseEntity<Void>> updateGlobalSchemaCompatibilityLevel(
   public Mono<ResponseEntity<Void>> updateGlobalSchemaCompatibilityLevel(
-      String clusterName, @Valid Mono<CompatibilityLevelDTO> compatibilityLevel,
+      String clusterName, @Valid Mono<CompatibilityLevelDTO> compatibilityLevelMono,
       ServerWebExchange exchange) {
       ServerWebExchange exchange) {
-    log.info("Updating schema compatibility globally");
-    return schemaRegistryService.updateSchemaCompatibility(
-            getCluster(clusterName), compatibilityLevel)
-        .map(ResponseEntity::ok);
+    Mono<Void> validateAccess = accessControlService.validateAccess(AccessContext.builder()
+        .cluster(clusterName)
+        .schemaActions(SchemaAction.MODIFY_GLOBAL_COMPATIBILITY)
+        .build());
+
+    return validateAccess.then(
+        compatibilityLevelMono
+            .flatMap(compatibilityLevelDTO ->
+                schemaRegistryService.updateGlobalSchemaCompatibility(
+                    getCluster(clusterName),
+                    kafkaSrMapper.fromDto(compatibilityLevelDTO.getCompatibility())
+                ))
+            .thenReturn(ResponseEntity.ok().build())
+    );
   }
   }
 
 
   @Override
   @Override
   public Mono<ResponseEntity<Void>> updateSchemaCompatibilityLevel(
   public Mono<ResponseEntity<Void>> updateSchemaCompatibilityLevel(
-      String clusterName, String subject, @Valid Mono<CompatibilityLevelDTO> compatibilityLevel,
+      String clusterName, String subject, @Valid Mono<CompatibilityLevelDTO> compatibilityLevelMono,
       ServerWebExchange exchange) {
       ServerWebExchange exchange) {
-    log.info("Updating schema compatibility for subject: {}", subject);
-    return schemaRegistryService.updateSchemaCompatibility(
-            getCluster(clusterName), subject, compatibilityLevel)
-        .map(ResponseEntity::ok);
+    Mono<Void> validateAccess = accessControlService.validateAccess(AccessContext.builder()
+        .cluster(clusterName)
+        .schemaActions(SchemaAction.EDIT)
+        .build());
+
+    return validateAccess.then(
+        compatibilityLevelMono
+            .flatMap(compatibilityLevelDTO ->
+                schemaRegistryService.updateSchemaCompatibility(
+                    getCluster(clusterName),
+                    subject,
+                    kafkaSrMapper.fromDto(compatibilityLevelDTO.getCompatibility())
+                ))
+            .thenReturn(ResponseEntity.ok().build())
+    );
   }
   }
 }
 }

+ 2 - 4
kafka-ui-api/src/main/java/com/provectus/kafka/ui/controller/StaticController.java

@@ -53,9 +53,7 @@ public class StaticController {
   @SneakyThrows
   @SneakyThrows
   private String buildFile(Resource file, String contextPath) {
   private String buildFile(Resource file, String contextPath) {
     return ResourceUtil.readAsString(file)
     return ResourceUtil.readAsString(file)
-        .replace("\"/assets/", "\"" + contextPath + "/assets/")
-        .replace("\"/favicon/", "\"" + contextPath + "/favicon/")
-        .replace("/manifest.json", contextPath + "/manifest.json")
-        .replace("window.basePath = ''", "window.basePath=\"" + contextPath + "\"");
+        .replace("\"assets/", "\"" + contextPath + "/assets/")
+        .replace("PUBLIC-PATH-VARIABLE",  contextPath);
   }
   }
 }
 }

+ 179 - 65
kafka-ui-api/src/main/java/com/provectus/kafka/ui/controller/TopicsController.java

@@ -1,5 +1,10 @@
 package com.provectus.kafka.ui.controller;
 package com.provectus.kafka.ui.controller;
 
 
+import static com.provectus.kafka.ui.model.rbac.permission.TopicAction.CREATE;
+import static com.provectus.kafka.ui.model.rbac.permission.TopicAction.DELETE;
+import static com.provectus.kafka.ui.model.rbac.permission.TopicAction.EDIT;
+import static com.provectus.kafka.ui.model.rbac.permission.TopicAction.MESSAGES_READ;
+import static com.provectus.kafka.ui.model.rbac.permission.TopicAction.VIEW;
 import static java.util.stream.Collectors.toList;
 import static java.util.stream.Collectors.toList;
 
 
 import com.provectus.kafka.ui.api.TopicsApi;
 import com.provectus.kafka.ui.api.TopicsApi;
@@ -19,8 +24,10 @@ import com.provectus.kafka.ui.model.TopicDTO;
 import com.provectus.kafka.ui.model.TopicDetailsDTO;
 import com.provectus.kafka.ui.model.TopicDetailsDTO;
 import com.provectus.kafka.ui.model.TopicUpdateDTO;
 import com.provectus.kafka.ui.model.TopicUpdateDTO;
 import com.provectus.kafka.ui.model.TopicsResponseDTO;
 import com.provectus.kafka.ui.model.TopicsResponseDTO;
+import com.provectus.kafka.ui.model.rbac.AccessContext;
 import com.provectus.kafka.ui.service.TopicsService;
 import com.provectus.kafka.ui.service.TopicsService;
 import com.provectus.kafka.ui.service.analyze.TopicAnalysisService;
 import com.provectus.kafka.ui.service.analyze.TopicAnalysisService;
+import com.provectus.kafka.ui.service.rbac.AccessControlService;
 import java.util.Comparator;
 import java.util.Comparator;
 import java.util.List;
 import java.util.List;
 import javax.validation.Valid;
 import javax.validation.Valid;
@@ -44,66 +51,121 @@ public class TopicsController extends AbstractController implements TopicsApi {
   private final TopicsService topicsService;
   private final TopicsService topicsService;
   private final TopicAnalysisService topicAnalysisService;
   private final TopicAnalysisService topicAnalysisService;
   private final ClusterMapper clusterMapper;
   private final ClusterMapper clusterMapper;
+  private final AccessControlService accessControlService;
 
 
   @Override
   @Override
   public Mono<ResponseEntity<TopicDTO>> createTopic(
   public Mono<ResponseEntity<TopicDTO>> createTopic(
       String clusterName, @Valid Mono<TopicCreationDTO> topicCreation, ServerWebExchange exchange) {
       String clusterName, @Valid Mono<TopicCreationDTO> topicCreation, ServerWebExchange exchange) {
-    return topicsService.createTopic(getCluster(clusterName), topicCreation)
-        .map(clusterMapper::toTopic)
-        .map(s -> new ResponseEntity<>(s, HttpStatus.OK))
-        .switchIfEmpty(Mono.just(ResponseEntity.notFound().build()));
+
+    Mono<Void> validateAccess = accessControlService.validateAccess(AccessContext.builder()
+        .cluster(clusterName)
+        .topicActions(CREATE)
+        .build());
+
+    return validateAccess.then(
+        topicsService.createTopic(getCluster(clusterName), topicCreation)
+            .map(clusterMapper::toTopic)
+            .map(s -> new ResponseEntity<>(s, HttpStatus.OK))
+            .switchIfEmpty(Mono.just(ResponseEntity.notFound().build()))
+    );
   }
   }
 
 
   @Override
   @Override
   public Mono<ResponseEntity<TopicDTO>> recreateTopic(String clusterName,
   public Mono<ResponseEntity<TopicDTO>> recreateTopic(String clusterName,
-                                                      String topicName, ServerWebExchange serverWebExchange) {
-    return topicsService.recreateTopic(getCluster(clusterName), topicName)
-        .map(clusterMapper::toTopic)
-        .map(s -> new ResponseEntity<>(s, HttpStatus.CREATED));
+                                                      String topicName, ServerWebExchange exchange) {
+    Mono<Void> validateAccess = accessControlService.validateAccess(AccessContext.builder()
+        .cluster(clusterName)
+        .topic(topicName)
+        .topicActions(VIEW, CREATE, DELETE)
+        .build());
+
+    return validateAccess.then(
+        topicsService.recreateTopic(getCluster(clusterName), topicName)
+            .map(clusterMapper::toTopic)
+            .map(s -> new ResponseEntity<>(s, HttpStatus.CREATED))
+    );
   }
   }
 
 
   @Override
   @Override
   public Mono<ResponseEntity<TopicDTO>> cloneTopic(
   public Mono<ResponseEntity<TopicDTO>> cloneTopic(
       String clusterName, String topicName, String newTopicName, ServerWebExchange exchange) {
       String clusterName, String topicName, String newTopicName, ServerWebExchange exchange) {
-    return topicsService.cloneTopic(getCluster(clusterName), topicName, newTopicName)
+
+    Mono<Void> validateAccess = accessControlService.validateAccess(AccessContext.builder()
+        .cluster(clusterName)
+        .topic(topicName)
+        .topicActions(VIEW, CREATE)
+        .build());
+
+    return validateAccess.then(topicsService.cloneTopic(getCluster(clusterName), topicName, newTopicName)
         .map(clusterMapper::toTopic)
         .map(clusterMapper::toTopic)
-        .map(s -> new ResponseEntity<>(s, HttpStatus.CREATED));
+        .map(s -> new ResponseEntity<>(s, HttpStatus.CREATED))
+    );
   }
   }
 
 
   @Override
   @Override
   public Mono<ResponseEntity<Void>> deleteTopic(
   public Mono<ResponseEntity<Void>> deleteTopic(
       String clusterName, String topicName, ServerWebExchange exchange) {
       String clusterName, String topicName, ServerWebExchange exchange) {
-    return topicsService.deleteTopic(getCluster(clusterName), topicName).map(ResponseEntity::ok);
+
+    Mono<Void> validateAccess = accessControlService.validateAccess(AccessContext.builder()
+        .cluster(clusterName)
+        .topic(topicName)
+        .topicActions(DELETE)
+        .build());
+
+    return validateAccess.then(
+        topicsService.deleteTopic(getCluster(clusterName), topicName).map(ResponseEntity::ok)
+    );
   }
   }
 
 
 
 
   @Override
   @Override
   public Mono<ResponseEntity<Flux<TopicConfigDTO>>> getTopicConfigs(
   public Mono<ResponseEntity<Flux<TopicConfigDTO>>> getTopicConfigs(
       String clusterName, String topicName, ServerWebExchange exchange) {
       String clusterName, String topicName, ServerWebExchange exchange) {
-    return topicsService.getTopicConfigs(getCluster(clusterName), topicName)
-        .map(lst -> lst.stream()
-            .map(InternalTopicConfig::from)
-            .map(clusterMapper::toTopicConfig)
-            .collect(toList()))
-        .map(Flux::fromIterable)
-        .map(ResponseEntity::ok);
+
+    Mono<Void> validateAccess = accessControlService.validateAccess(AccessContext.builder()
+        .cluster(clusterName)
+        .topic(topicName)
+        .topicActions(VIEW)
+        .build());
+
+    return validateAccess.then(
+        topicsService.getTopicConfigs(getCluster(clusterName), topicName)
+            .map(lst -> lst.stream()
+                .map(InternalTopicConfig::from)
+                .map(clusterMapper::toTopicConfig)
+                .collect(toList()))
+            .map(Flux::fromIterable)
+            .map(ResponseEntity::ok)
+    );
   }
   }
 
 
   @Override
   @Override
   public Mono<ResponseEntity<TopicDetailsDTO>> getTopicDetails(
   public Mono<ResponseEntity<TopicDetailsDTO>> getTopicDetails(
       String clusterName, String topicName, ServerWebExchange exchange) {
       String clusterName, String topicName, ServerWebExchange exchange) {
-    return topicsService.getTopicDetails(getCluster(clusterName), topicName)
-        .map(clusterMapper::toTopicDetails)
-        .map(ResponseEntity::ok);
+
+    Mono<Void> validateAccess = accessControlService.validateAccess(AccessContext.builder()
+        .cluster(clusterName)
+        .topic(topicName)
+        .topicActions(VIEW)
+        .build());
+
+    return validateAccess.then(
+        topicsService.getTopicDetails(getCluster(clusterName), topicName)
+            .map(clusterMapper::toTopicDetails)
+            .map(ResponseEntity::ok)
+    );
   }
   }
 
 
-  public Mono<ResponseEntity<TopicsResponseDTO>> getTopics(String clusterName, @Valid Integer page,
+  @Override
+  public Mono<ResponseEntity<TopicsResponseDTO>> getTopics(String clusterName,
+                                                           @Valid Integer page,
                                                            @Valid Integer perPage,
                                                            @Valid Integer perPage,
                                                            @Valid Boolean showInternal,
                                                            @Valid Boolean showInternal,
                                                            @Valid String search,
                                                            @Valid String search,
                                                            @Valid TopicColumnsToSortDTO orderBy,
                                                            @Valid TopicColumnsToSortDTO orderBy,
                                                            @Valid SortOrderDTO sortOrder,
                                                            @Valid SortOrderDTO sortOrder,
                                                            ServerWebExchange exchange) {
                                                            ServerWebExchange exchange) {
+
     return topicsService.getTopicsForPagination(getCluster(clusterName))
     return topicsService.getTopicsForPagination(getCluster(clusterName))
         .flatMap(existingTopics -> {
         .flatMap(existingTopics -> {
           int pageSize = perPage != null && perPage > 0 ? perPage : DEFAULT_PAGE_SIZE;
           int pageSize = perPage != null && perPage > 0 ? perPage : DEFAULT_PAGE_SIZE;
@@ -115,7 +177,7 @@ public class TopicsController extends AbstractController implements TopicsApi {
                   || showInternal != null && showInternal)
                   || showInternal != null && showInternal)
               .filter(topic -> search == null || StringUtils.contains(topic.getName(), search))
               .filter(topic -> search == null || StringUtils.contains(topic.getName(), search))
               .sorted(comparator)
               .sorted(comparator)
-              .collect(toList());
+              .toList();
           var totalPages = (filtered.size() / pageSize)
           var totalPages = (filtered.size() / pageSize)
               + (filtered.size() % pageSize == 0 ? 0 : 1);
               + (filtered.size() % pageSize == 0 ? 0 : 1);
 
 
@@ -126,42 +188,34 @@ public class TopicsController extends AbstractController implements TopicsApi {
               .collect(toList());
               .collect(toList());
 
 
           return topicsService.loadTopics(getCluster(clusterName), topicsPage)
           return topicsService.loadTopics(getCluster(clusterName), topicsPage)
+              .flatMapMany(Flux::fromIterable)
+              .filterWhen(dto -> accessControlService.isTopicAccessible(dto, clusterName))
+              .collectList()
               .map(topicsToRender ->
               .map(topicsToRender ->
                   new TopicsResponseDTO()
                   new TopicsResponseDTO()
                       .topics(topicsToRender.stream().map(clusterMapper::toTopic).collect(toList()))
                       .topics(topicsToRender.stream().map(clusterMapper::toTopic).collect(toList()))
                       .pageCount(totalPages));
                       .pageCount(totalPages));
-        }).map(ResponseEntity::ok);
-  }
-
-  private Comparator<InternalTopic> getComparatorForTopic(
-      TopicColumnsToSortDTO orderBy) {
-    var defaultComparator = Comparator.comparing(InternalTopic::getName);
-    if (orderBy == null) {
-      return defaultComparator;
-    }
-    switch (orderBy) {
-      case TOTAL_PARTITIONS:
-        return Comparator.comparing(InternalTopic::getPartitionCount);
-      case OUT_OF_SYNC_REPLICAS:
-        return Comparator.comparing(t -> t.getReplicas() - t.getInSyncReplicas());
-      case REPLICATION_FACTOR:
-        return Comparator.comparing(InternalTopic::getReplicationFactor);
-      case SIZE:
-        return Comparator.comparing(InternalTopic::getSegmentSize);
-      case NAME:
-      default:
-        return defaultComparator;
-    }
+        })
+        .map(ResponseEntity::ok);
   }
   }
 
 
   @Override
   @Override
   public Mono<ResponseEntity<TopicDTO>> updateTopic(
   public Mono<ResponseEntity<TopicDTO>> updateTopic(
-      String clusterId, String topicName, @Valid Mono<TopicUpdateDTO> topicUpdate,
+      String clusterName, String topicName, @Valid Mono<TopicUpdateDTO> topicUpdate,
       ServerWebExchange exchange) {
       ServerWebExchange exchange) {
-    return topicsService
-        .updateTopic(getCluster(clusterId), topicName, topicUpdate)
-        .map(clusterMapper::toTopic)
-        .map(ResponseEntity::ok);
+
+    Mono<Void> validateAccess = accessControlService.validateAccess(AccessContext.builder()
+        .cluster(clusterName)
+        .topic(topicName)
+        .topicActions(VIEW, EDIT)
+        .build());
+
+    return validateAccess.then(
+        topicsService
+            .updateTopic(getCluster(clusterName), topicName, topicUpdate)
+            .map(clusterMapper::toTopic)
+            .map(ResponseEntity::ok)
+    );
   }
   }
 
 
   @Override
   @Override
@@ -169,9 +223,18 @@ public class TopicsController extends AbstractController implements TopicsApi {
       String clusterName, String topicName,
       String clusterName, String topicName,
       Mono<PartitionsIncreaseDTO> partitionsIncrease,
       Mono<PartitionsIncreaseDTO> partitionsIncrease,
       ServerWebExchange exchange) {
       ServerWebExchange exchange) {
-    return partitionsIncrease.flatMap(partitions ->
+
+    Mono<Void> validateAccess = accessControlService.validateAccess(AccessContext.builder()
+        .cluster(clusterName)
+        .topic(topicName)
+        .topicActions(VIEW, EDIT)
+        .build());
+
+    return validateAccess.then(
+        partitionsIncrease.flatMap(partitions ->
             topicsService.increaseTopicPartitions(getCluster(clusterName), topicName, partitions)
             topicsService.increaseTopicPartitions(getCluster(clusterName), topicName, partitions)
-        ).map(ResponseEntity::ok);
+        ).map(ResponseEntity::ok)
+    );
   }
   }
 
 
   @Override
   @Override
@@ -179,23 +242,48 @@ public class TopicsController extends AbstractController implements TopicsApi {
       String clusterName, String topicName,
       String clusterName, String topicName,
       Mono<ReplicationFactorChangeDTO> replicationFactorChange,
       Mono<ReplicationFactorChangeDTO> replicationFactorChange,
       ServerWebExchange exchange) {
       ServerWebExchange exchange) {
-    return replicationFactorChange
-        .flatMap(rfc ->
-            topicsService.changeReplicationFactor(getCluster(clusterName), topicName, rfc))
-        .map(ResponseEntity::ok);
+
+    Mono<Void> validateAccess = accessControlService.validateAccess(AccessContext.builder()
+        .cluster(clusterName)
+        .topic(topicName)
+        .topicActions(VIEW, EDIT)
+        .build());
+
+    return validateAccess.then(
+        replicationFactorChange
+            .flatMap(rfc ->
+                topicsService.changeReplicationFactor(getCluster(clusterName), topicName, rfc))
+            .map(ResponseEntity::ok)
+    );
   }
   }
 
 
   @Override
   @Override
   public Mono<ResponseEntity<Void>> analyzeTopic(String clusterName, String topicName, ServerWebExchange exchange) {
   public Mono<ResponseEntity<Void>> analyzeTopic(String clusterName, String topicName, ServerWebExchange exchange) {
-    return topicAnalysisService.analyze(getCluster(clusterName), topicName)
-        .thenReturn(ResponseEntity.ok().build());
+
+    Mono<Void> validateAccess = accessControlService.validateAccess(AccessContext.builder()
+        .cluster(clusterName)
+        .topic(topicName)
+        .topicActions(MESSAGES_READ)
+        .build());
+
+    return validateAccess.then(
+        topicAnalysisService.analyze(getCluster(clusterName), topicName)
+            .thenReturn(ResponseEntity.ok().build())
+    );
   }
   }
 
 
   @Override
   @Override
   public Mono<ResponseEntity<Void>> cancelTopicAnalysis(String clusterName, String topicName,
   public Mono<ResponseEntity<Void>> cancelTopicAnalysis(String clusterName, String topicName,
-                                                       ServerWebExchange exchange) {
+                                                        ServerWebExchange exchange) {
+    Mono<Void> validateAccess = accessControlService.validateAccess(AccessContext.builder()
+        .cluster(clusterName)
+        .topic(topicName)
+        .topicActions(MESSAGES_READ)
+        .build());
+
     topicAnalysisService.cancelAnalysis(getCluster(clusterName), topicName);
     topicAnalysisService.cancelAnalysis(getCluster(clusterName), topicName);
-    return Mono.just(ResponseEntity.ok().build());
+
+    return validateAccess.thenReturn(ResponseEntity.ok().build());
   }
   }
 
 
 
 
@@ -203,10 +291,36 @@ public class TopicsController extends AbstractController implements TopicsApi {
   public Mono<ResponseEntity<TopicAnalysisDTO>> getTopicAnalysis(String clusterName,
   public Mono<ResponseEntity<TopicAnalysisDTO>> getTopicAnalysis(String clusterName,
                                                                  String topicName,
                                                                  String topicName,
                                                                  ServerWebExchange exchange) {
                                                                  ServerWebExchange exchange) {
-    return Mono.just(
-        topicAnalysisService.getTopicAnalysis(getCluster(clusterName), topicName)
-            .map(ResponseEntity::ok)
-            .orElseGet(() -> ResponseEntity.notFound().build())
-    );
+
+    Mono<Void> validateAccess = accessControlService.validateAccess(AccessContext.builder()
+        .cluster(clusterName)
+        .topic(topicName)
+        .topicActions(MESSAGES_READ)
+        .build());
+
+    return validateAccess.thenReturn(topicAnalysisService.getTopicAnalysis(getCluster(clusterName), topicName)
+        .map(ResponseEntity::ok)
+        .orElseGet(() -> ResponseEntity.notFound().build()));
+  }
+
+  private Comparator<InternalTopic> getComparatorForTopic(
+      TopicColumnsToSortDTO orderBy) {
+    var defaultComparator = Comparator.comparing(InternalTopic::getName);
+    if (orderBy == null) {
+      return defaultComparator;
+    }
+    switch (orderBy) {
+      case TOTAL_PARTITIONS:
+        return Comparator.comparing(InternalTopic::getPartitionCount);
+      case OUT_OF_SYNC_REPLICAS:
+        return Comparator.comparing(t -> t.getReplicas() - t.getInSyncReplicas());
+      case REPLICATION_FACTOR:
+        return Comparator.comparing(InternalTopic::getReplicationFactor);
+      case SIZE:
+        return Comparator.comparing(InternalTopic::getSegmentSize);
+      case NAME:
+      default:
+        return defaultComparator;
+    }
   }
   }
 }
 }

+ 9 - 5
kafka-ui-api/src/main/java/com/provectus/kafka/ui/emitter/AbstractEmitter.java

@@ -4,6 +4,7 @@ import com.provectus.kafka.ui.model.TopicMessageDTO;
 import com.provectus.kafka.ui.model.TopicMessageEventDTO;
 import com.provectus.kafka.ui.model.TopicMessageEventDTO;
 import com.provectus.kafka.ui.model.TopicMessagePhaseDTO;
 import com.provectus.kafka.ui.model.TopicMessagePhaseDTO;
 import com.provectus.kafka.ui.serdes.ConsumerRecordDeserializer;
 import com.provectus.kafka.ui.serdes.ConsumerRecordDeserializer;
+import com.provectus.kafka.ui.util.PollingThrottler;
 import java.time.Duration;
 import java.time.Duration;
 import java.time.Instant;
 import java.time.Instant;
 import org.apache.kafka.clients.consumer.Consumer;
 import org.apache.kafka.clients.consumer.Consumer;
@@ -24,9 +25,11 @@ public abstract class AbstractEmitter {
 
 
   private final ConsumerRecordDeserializer recordDeserializer;
   private final ConsumerRecordDeserializer recordDeserializer;
   private final ConsumingStats consumingStats = new ConsumingStats();
   private final ConsumingStats consumingStats = new ConsumingStats();
+  private final PollingThrottler throttler;
 
 
-  protected AbstractEmitter(ConsumerRecordDeserializer recordDeserializer) {
+  protected AbstractEmitter(ConsumerRecordDeserializer recordDeserializer, PollingThrottler throttler) {
     this.recordDeserializer = recordDeserializer;
     this.recordDeserializer = recordDeserializer;
+    this.throttler = throttler;
   }
   }
 
 
   protected ConsumerRecords<Bytes, Bytes> poll(
   protected ConsumerRecords<Bytes, Bytes> poll(
@@ -39,7 +42,8 @@ public abstract class AbstractEmitter {
     Instant start = Instant.now();
     Instant start = Instant.now();
     ConsumerRecords<Bytes, Bytes> records = consumer.poll(timeout);
     ConsumerRecords<Bytes, Bytes> records = consumer.poll(timeout);
     Instant finish = Instant.now();
     Instant finish = Instant.now();
-    sendConsuming(sink, records, Duration.between(start, finish).toMillis());
+    int polledBytes = sendConsuming(sink, records, Duration.between(start, finish).toMillis());
+    throttler.throttleAfterPoll(polledBytes);
     return records;
     return records;
   }
   }
 
 
@@ -61,10 +65,10 @@ public abstract class AbstractEmitter {
     );
     );
   }
   }
 
 
-  protected void sendConsuming(FluxSink<TopicMessageEventDTO> sink,
+  protected int sendConsuming(FluxSink<TopicMessageEventDTO> sink,
                                ConsumerRecords<Bytes, Bytes> records,
                                ConsumerRecords<Bytes, Bytes> records,
                                long elapsed) {
                                long elapsed) {
-    consumingStats.sendConsumingEvt(sink, records, elapsed, getFilterApplyErrors(sink));
+    return consumingStats.sendConsumingEvt(sink, records, elapsed, getFilterApplyErrors(sink));
   }
   }
 
 
   protected void sendFinishStatsAndCompleteSink(FluxSink<TopicMessageEventDTO> sink) {
   protected void sendFinishStatsAndCompleteSink(FluxSink<TopicMessageEventDTO> sink) {
@@ -78,4 +82,4 @@ public abstract class AbstractEmitter {
         .<Number>map(MessageFilterStats::getFilterApplyErrors)
         .<Number>map(MessageFilterStats::getFilterApplyErrors)
         .orElse(0);
         .orElse(0);
   }
   }
-}
+}

+ 9 - 2
kafka-ui-api/src/main/java/com/provectus/kafka/ui/emitter/BackwardRecordEmitter.java

@@ -3,6 +3,7 @@ package com.provectus.kafka.ui.emitter;
 import com.provectus.kafka.ui.model.ConsumerPosition;
 import com.provectus.kafka.ui.model.ConsumerPosition;
 import com.provectus.kafka.ui.model.TopicMessageEventDTO;
 import com.provectus.kafka.ui.model.TopicMessageEventDTO;
 import com.provectus.kafka.ui.serdes.ConsumerRecordDeserializer;
 import com.provectus.kafka.ui.serdes.ConsumerRecordDeserializer;
+import com.provectus.kafka.ui.util.PollingThrottler;
 import java.time.Duration;
 import java.time.Duration;
 import java.util.ArrayList;
 import java.util.ArrayList;
 import java.util.Collections;
 import java.util.Collections;
@@ -16,6 +17,7 @@ import org.apache.kafka.clients.consumer.Consumer;
 import org.apache.kafka.clients.consumer.ConsumerRecord;
 import org.apache.kafka.clients.consumer.ConsumerRecord;
 import org.apache.kafka.clients.consumer.KafkaConsumer;
 import org.apache.kafka.clients.consumer.KafkaConsumer;
 import org.apache.kafka.common.TopicPartition;
 import org.apache.kafka.common.TopicPartition;
+import org.apache.kafka.common.errors.InterruptException;
 import org.apache.kafka.common.utils.Bytes;
 import org.apache.kafka.common.utils.Bytes;
 import reactor.core.publisher.FluxSink;
 import reactor.core.publisher.FluxSink;
 
 
@@ -34,8 +36,9 @@ public class BackwardRecordEmitter
       Supplier<KafkaConsumer<Bytes, Bytes>> consumerSupplier,
       Supplier<KafkaConsumer<Bytes, Bytes>> consumerSupplier,
       ConsumerPosition consumerPosition,
       ConsumerPosition consumerPosition,
       int messagesPerPage,
       int messagesPerPage,
-      ConsumerRecordDeserializer recordDeserializer) {
-    super(recordDeserializer);
+      ConsumerRecordDeserializer recordDeserializer,
+      PollingThrottler throttler) {
+    super(recordDeserializer, throttler);
     this.consumerPosition = consumerPosition;
     this.consumerPosition = consumerPosition;
     this.messagesPerPage = messagesPerPage;
     this.messagesPerPage = messagesPerPage;
     this.consumerSupplier = consumerSupplier;
     this.consumerSupplier = consumerSupplier;
@@ -43,6 +46,7 @@ public class BackwardRecordEmitter
 
 
   @Override
   @Override
   public void accept(FluxSink<TopicMessageEventDTO> sink) {
   public void accept(FluxSink<TopicMessageEventDTO> sink) {
+    log.debug("Starting backward polling for {}", consumerPosition);
     try (KafkaConsumer<Bytes, Bytes> consumer = consumerSupplier.get()) {
     try (KafkaConsumer<Bytes, Bytes> consumer = consumerSupplier.get()) {
       sendPhase(sink, "Created consumer");
       sendPhase(sink, "Created consumer");
 
 
@@ -82,6 +86,9 @@ public class BackwardRecordEmitter
       }
       }
       sendFinishStatsAndCompleteSink(sink);
       sendFinishStatsAndCompleteSink(sink);
       log.debug("Polling finished");
       log.debug("Polling finished");
+    } catch (InterruptException kafkaInterruptException) {
+      log.debug("Polling finished due to thread interruption");
+      sink.complete();
     } catch (Exception e) {
     } catch (Exception e) {
       log.error("Error occurred while consuming records", e);
       log.error("Error occurred while consuming records", e);
       sink.error(e);
       sink.error(e);

+ 8 - 11
kafka-ui-api/src/main/java/com/provectus/kafka/ui/emitter/ConsumingStats.java

@@ -2,9 +2,8 @@ package com.provectus.kafka.ui.emitter;
 
 
 import com.provectus.kafka.ui.model.TopicMessageConsumingDTO;
 import com.provectus.kafka.ui.model.TopicMessageConsumingDTO;
 import com.provectus.kafka.ui.model.TopicMessageEventDTO;
 import com.provectus.kafka.ui.model.TopicMessageEventDTO;
-import org.apache.kafka.clients.consumer.ConsumerRecord;
+import com.provectus.kafka.ui.util.ConsumerRecordsUtil;
 import org.apache.kafka.clients.consumer.ConsumerRecords;
 import org.apache.kafka.clients.consumer.ConsumerRecords;
-import org.apache.kafka.common.header.Header;
 import org.apache.kafka.common.utils.Bytes;
 import org.apache.kafka.common.utils.Bytes;
 import reactor.core.publisher.FluxSink;
 import reactor.core.publisher.FluxSink;
 
 
@@ -14,18 +13,15 @@ class ConsumingStats {
   private int records = 0;
   private int records = 0;
   private long elapsed = 0;
   private long elapsed = 0;
 
 
-  void sendConsumingEvt(FluxSink<TopicMessageEventDTO> sink,
+  /**
+   * returns bytes polled.
+   */
+  int sendConsumingEvt(FluxSink<TopicMessageEventDTO> sink,
                         ConsumerRecords<Bytes, Bytes> polledRecords,
                         ConsumerRecords<Bytes, Bytes> polledRecords,
                         long elapsed,
                         long elapsed,
                         Number filterApplyErrors) {
                         Number filterApplyErrors) {
-    for (ConsumerRecord<Bytes, Bytes> rec : polledRecords) {
-      for (Header header : rec.headers()) {
-        bytes +=
-            (header.key() != null ? header.key().getBytes().length : 0L)
-                + (header.value() != null ? header.value().length : 0L);
-      }
-      bytes += rec.serializedKeySize() + rec.serializedValueSize();
-    }
+    int polledBytes = ConsumerRecordsUtil.calculatePolledSize(polledRecords);
+    bytes += polledBytes;
     this.records += polledRecords.count();
     this.records += polledRecords.count();
     this.elapsed += elapsed;
     this.elapsed += elapsed;
     sink.next(
     sink.next(
@@ -33,6 +29,7 @@ class ConsumingStats {
             .type(TopicMessageEventDTO.TypeEnum.CONSUMING)
             .type(TopicMessageEventDTO.TypeEnum.CONSUMING)
             .consuming(createConsumingStats(sink, filterApplyErrors))
             .consuming(createConsumingStats(sink, filterApplyErrors))
     );
     );
+    return polledBytes;
   }
   }
 
 
   void sendFinishEvent(FluxSink<TopicMessageEventDTO> sink, Number filterApplyErrors) {
   void sendFinishEvent(FluxSink<TopicMessageEventDTO> sink, Number filterApplyErrors) {

+ 11 - 4
kafka-ui-api/src/main/java/com/provectus/kafka/ui/emitter/ForwardRecordEmitter.java

@@ -3,11 +3,13 @@ package com.provectus.kafka.ui.emitter;
 import com.provectus.kafka.ui.model.ConsumerPosition;
 import com.provectus.kafka.ui.model.ConsumerPosition;
 import com.provectus.kafka.ui.model.TopicMessageEventDTO;
 import com.provectus.kafka.ui.model.TopicMessageEventDTO;
 import com.provectus.kafka.ui.serdes.ConsumerRecordDeserializer;
 import com.provectus.kafka.ui.serdes.ConsumerRecordDeserializer;
+import com.provectus.kafka.ui.util.PollingThrottler;
 import java.util.function.Supplier;
 import java.util.function.Supplier;
 import lombok.extern.slf4j.Slf4j;
 import lombok.extern.slf4j.Slf4j;
 import org.apache.kafka.clients.consumer.ConsumerRecord;
 import org.apache.kafka.clients.consumer.ConsumerRecord;
 import org.apache.kafka.clients.consumer.ConsumerRecords;
 import org.apache.kafka.clients.consumer.ConsumerRecords;
 import org.apache.kafka.clients.consumer.KafkaConsumer;
 import org.apache.kafka.clients.consumer.KafkaConsumer;
+import org.apache.kafka.common.errors.InterruptException;
 import org.apache.kafka.common.utils.Bytes;
 import org.apache.kafka.common.utils.Bytes;
 import reactor.core.publisher.FluxSink;
 import reactor.core.publisher.FluxSink;
 
 
@@ -22,14 +24,16 @@ public class ForwardRecordEmitter
   public ForwardRecordEmitter(
   public ForwardRecordEmitter(
       Supplier<KafkaConsumer<Bytes, Bytes>> consumerSupplier,
       Supplier<KafkaConsumer<Bytes, Bytes>> consumerSupplier,
       ConsumerPosition position,
       ConsumerPosition position,
-      ConsumerRecordDeserializer recordDeserializer) {
-    super(recordDeserializer);
+      ConsumerRecordDeserializer recordDeserializer,
+      PollingThrottler throttler) {
+    super(recordDeserializer, throttler);
     this.position = position;
     this.position = position;
     this.consumerSupplier = consumerSupplier;
     this.consumerSupplier = consumerSupplier;
   }
   }
 
 
   @Override
   @Override
   public void accept(FluxSink<TopicMessageEventDTO> sink) {
   public void accept(FluxSink<TopicMessageEventDTO> sink) {
+    log.debug("Starting forward polling for {}", position);
     try (KafkaConsumer<Bytes, Bytes> consumer = consumerSupplier.get()) {
     try (KafkaConsumer<Bytes, Bytes> consumer = consumerSupplier.get()) {
       sendPhase(sink, "Assigning partitions");
       sendPhase(sink, "Assigning partitions");
       var seekOperations = SeekOperations.create(consumer, position);
       var seekOperations = SeekOperations.create(consumer, position);
@@ -43,7 +47,7 @@ public class ForwardRecordEmitter
 
 
         sendPhase(sink, "Polling");
         sendPhase(sink, "Polling");
         ConsumerRecords<Bytes, Bytes> records = poll(sink, consumer);
         ConsumerRecords<Bytes, Bytes> records = poll(sink, consumer);
-        log.info("{} records polled", records.count());
+        log.debug("{} records polled", records.count());
         emptyPolls = records.isEmpty() ? emptyPolls + 1 : 0;
         emptyPolls = records.isEmpty() ? emptyPolls + 1 : 0;
 
 
         for (ConsumerRecord<Bytes, Bytes> msg : records) {
         for (ConsumerRecord<Bytes, Bytes> msg : records) {
@@ -55,7 +59,10 @@ public class ForwardRecordEmitter
         }
         }
       }
       }
       sendFinishStatsAndCompleteSink(sink);
       sendFinishStatsAndCompleteSink(sink);
-      log.info("Polling finished");
+      log.debug("Polling finished");
+    } catch (InterruptException kafkaInterruptException) {
+      log.debug("Polling finished due to thread interruption");
+      sink.complete();
     } catch (Exception e) {
     } catch (Exception e) {
       log.error("Error occurred while consuming records", e);
       log.error("Error occurred while consuming records", e);
       sink.error(e);
       sink.error(e);

+ 6 - 3
kafka-ui-api/src/main/java/com/provectus/kafka/ui/emitter/TailingEmitter.java

@@ -3,6 +3,7 @@ package com.provectus.kafka.ui.emitter;
 import com.provectus.kafka.ui.model.ConsumerPosition;
 import com.provectus.kafka.ui.model.ConsumerPosition;
 import com.provectus.kafka.ui.model.TopicMessageEventDTO;
 import com.provectus.kafka.ui.model.TopicMessageEventDTO;
 import com.provectus.kafka.ui.serdes.ConsumerRecordDeserializer;
 import com.provectus.kafka.ui.serdes.ConsumerRecordDeserializer;
+import com.provectus.kafka.ui.util.PollingThrottler;
 import java.util.HashMap;
 import java.util.HashMap;
 import java.util.function.Supplier;
 import java.util.function.Supplier;
 import lombok.extern.slf4j.Slf4j;
 import lombok.extern.slf4j.Slf4j;
@@ -20,16 +21,17 @@ public class TailingEmitter extends AbstractEmitter
 
 
   public TailingEmitter(Supplier<KafkaConsumer<Bytes, Bytes>> consumerSupplier,
   public TailingEmitter(Supplier<KafkaConsumer<Bytes, Bytes>> consumerSupplier,
                         ConsumerPosition consumerPosition,
                         ConsumerPosition consumerPosition,
-                        ConsumerRecordDeserializer recordDeserializer) {
-    super(recordDeserializer);
+                        ConsumerRecordDeserializer recordDeserializer,
+                        PollingThrottler throttler) {
+    super(recordDeserializer, throttler);
     this.consumerSupplier = consumerSupplier;
     this.consumerSupplier = consumerSupplier;
     this.consumerPosition = consumerPosition;
     this.consumerPosition = consumerPosition;
   }
   }
 
 
   @Override
   @Override
   public void accept(FluxSink<TopicMessageEventDTO> sink) {
   public void accept(FluxSink<TopicMessageEventDTO> sink) {
+    log.debug("Starting tailing polling for {}", consumerPosition);
     try (KafkaConsumer<Bytes, Bytes> consumer = consumerSupplier.get()) {
     try (KafkaConsumer<Bytes, Bytes> consumer = consumerSupplier.get()) {
-      log.debug("Starting topic tailing");
       assignAndSeek(consumer);
       assignAndSeek(consumer);
       while (!sink.isCancelled()) {
       while (!sink.isCancelled()) {
         sendPhase(sink, "Polling");
         sendPhase(sink, "Polling");
@@ -39,6 +41,7 @@ public class TailingEmitter extends AbstractEmitter
       sink.complete();
       sink.complete();
       log.debug("Tailing finished");
       log.debug("Tailing finished");
     } catch (InterruptException kafkaInterruptException) {
     } catch (InterruptException kafkaInterruptException) {
+      log.debug("Tailing finished due to thread interruption");
       sink.complete();
       sink.complete();
     } catch (Exception e) {
     } catch (Exception e) {
       log.error("Error consuming {}", consumerPosition, e);
       log.error("Error consuming {}", consumerPosition, e);

+ 2 - 0
kafka-ui-api/src/main/java/com/provectus/kafka/ui/exception/ErrorCode.java

@@ -7,6 +7,8 @@ import org.springframework.http.HttpStatus;
 
 
 public enum ErrorCode {
 public enum ErrorCode {
 
 
+  FORBIDDEN(403, HttpStatus.FORBIDDEN),
+
   UNEXPECTED(5000, HttpStatus.INTERNAL_SERVER_ERROR),
   UNEXPECTED(5000, HttpStatus.INTERNAL_SERVER_ERROR),
   KSQL_API_ERROR(5001, HttpStatus.INTERNAL_SERVER_ERROR),
   KSQL_API_ERROR(5001, HttpStatus.INTERNAL_SERVER_ERROR),
   BINDING_FAIL(4001, HttpStatus.BAD_REQUEST),
   BINDING_FAIL(4001, HttpStatus.BAD_REQUEST),

+ 2 - 2
kafka-ui-api/src/main/java/com/provectus/kafka/ui/exception/SchemaCompatibilityException.java

@@ -1,8 +1,8 @@
 package com.provectus.kafka.ui.exception;
 package com.provectus.kafka.ui.exception;
 
 
 public class SchemaCompatibilityException extends CustomBaseException {
 public class SchemaCompatibilityException extends CustomBaseException {
-  public SchemaCompatibilityException(String message) {
-    super(message);
+  public SchemaCompatibilityException() {
+    super("Schema being registered is incompatible with an earlier schema");
   }
   }
 
 
   @Override
   @Override

+ 0 - 12
kafka-ui-api/src/main/java/com/provectus/kafka/ui/exception/SchemaTypeNotSupportedException.java

@@ -1,12 +0,0 @@
-package com.provectus.kafka.ui.exception;
-
-public class SchemaTypeNotSupportedException extends UnprocessableEntityException {
-
-  private static final String REQUIRED_SCHEMA_REGISTRY_VERSION = "5.5.0";
-
-  public SchemaTypeNotSupportedException() {
-    super(String.format("Current version of Schema Registry does "
-        + "not support provided schema type,"
-        + " version %s or later is required here.", REQUIRED_SCHEMA_REGISTRY_VERSION));
-  }
-}

+ 7 - 81
kafka-ui-api/src/main/java/com/provectus/kafka/ui/mapper/ClusterMapper.java

@@ -2,30 +2,25 @@ package com.provectus.kafka.ui.mapper;
 
 
 import com.provectus.kafka.ui.config.ClustersProperties;
 import com.provectus.kafka.ui.config.ClustersProperties;
 import com.provectus.kafka.ui.model.BrokerConfigDTO;
 import com.provectus.kafka.ui.model.BrokerConfigDTO;
+import com.provectus.kafka.ui.model.BrokerDTO;
 import com.provectus.kafka.ui.model.BrokerDiskUsageDTO;
 import com.provectus.kafka.ui.model.BrokerDiskUsageDTO;
 import com.provectus.kafka.ui.model.BrokerMetricsDTO;
 import com.provectus.kafka.ui.model.BrokerMetricsDTO;
 import com.provectus.kafka.ui.model.ClusterDTO;
 import com.provectus.kafka.ui.model.ClusterDTO;
 import com.provectus.kafka.ui.model.ClusterMetricsDTO;
 import com.provectus.kafka.ui.model.ClusterMetricsDTO;
 import com.provectus.kafka.ui.model.ClusterStatsDTO;
 import com.provectus.kafka.ui.model.ClusterStatsDTO;
-import com.provectus.kafka.ui.model.CompatibilityCheckResponseDTO;
-import com.provectus.kafka.ui.model.CompatibilityLevelDTO;
 import com.provectus.kafka.ui.model.ConfigSourceDTO;
 import com.provectus.kafka.ui.model.ConfigSourceDTO;
 import com.provectus.kafka.ui.model.ConfigSynonymDTO;
 import com.provectus.kafka.ui.model.ConfigSynonymDTO;
 import com.provectus.kafka.ui.model.ConnectDTO;
 import com.provectus.kafka.ui.model.ConnectDTO;
-import com.provectus.kafka.ui.model.FailoverUrlList;
 import com.provectus.kafka.ui.model.Feature;
 import com.provectus.kafka.ui.model.Feature;
+import com.provectus.kafka.ui.model.InternalBroker;
 import com.provectus.kafka.ui.model.InternalBrokerConfig;
 import com.provectus.kafka.ui.model.InternalBrokerConfig;
 import com.provectus.kafka.ui.model.InternalBrokerDiskUsage;
 import com.provectus.kafka.ui.model.InternalBrokerDiskUsage;
 import com.provectus.kafka.ui.model.InternalClusterState;
 import com.provectus.kafka.ui.model.InternalClusterState;
-import com.provectus.kafka.ui.model.InternalKsqlServer;
 import com.provectus.kafka.ui.model.InternalPartition;
 import com.provectus.kafka.ui.model.InternalPartition;
 import com.provectus.kafka.ui.model.InternalReplica;
 import com.provectus.kafka.ui.model.InternalReplica;
-import com.provectus.kafka.ui.model.InternalSchemaRegistry;
 import com.provectus.kafka.ui.model.InternalTopic;
 import com.provectus.kafka.ui.model.InternalTopic;
 import com.provectus.kafka.ui.model.InternalTopicConfig;
 import com.provectus.kafka.ui.model.InternalTopicConfig;
 import com.provectus.kafka.ui.model.KafkaAclDTO;
 import com.provectus.kafka.ui.model.KafkaAclDTO;
-import com.provectus.kafka.ui.model.KafkaCluster;
-import com.provectus.kafka.ui.model.KafkaConnectCluster;
 import com.provectus.kafka.ui.model.MetricDTO;
 import com.provectus.kafka.ui.model.MetricDTO;
 import com.provectus.kafka.ui.model.Metrics;
 import com.provectus.kafka.ui.model.Metrics;
 import com.provectus.kafka.ui.model.PartitionDTO;
 import com.provectus.kafka.ui.model.PartitionDTO;
@@ -33,14 +28,10 @@ import com.provectus.kafka.ui.model.ReplicaDTO;
 import com.provectus.kafka.ui.model.TopicConfigDTO;
 import com.provectus.kafka.ui.model.TopicConfigDTO;
 import com.provectus.kafka.ui.model.TopicDTO;
 import com.provectus.kafka.ui.model.TopicDTO;
 import com.provectus.kafka.ui.model.TopicDetailsDTO;
 import com.provectus.kafka.ui.model.TopicDetailsDTO;
-import com.provectus.kafka.ui.model.schemaregistry.InternalCompatibilityCheck;
-import com.provectus.kafka.ui.model.schemaregistry.InternalCompatibilityLevel;
+import com.provectus.kafka.ui.service.masking.DataMasking;
 import com.provectus.kafka.ui.service.metrics.RawMetric;
 import com.provectus.kafka.ui.service.metrics.RawMetric;
-import java.util.Arrays;
-import java.util.Collections;
 import java.util.List;
 import java.util.List;
 import java.util.Map;
 import java.util.Map;
-import java.util.Properties;
 import java.util.stream.Collectors;
 import java.util.stream.Collectors;
 import org.apache.kafka.clients.admin.ConfigEntry;
 import org.apache.kafka.clients.admin.ConfigEntry;
 import org.apache.kafka.common.acl.AccessControlEntry;
 import org.apache.kafka.common.acl.AccessControlEntry;
@@ -52,19 +43,12 @@ import org.apache.kafka.common.resource.ResourcePattern;
 import org.apache.kafka.common.resource.ResourceType;
 import org.apache.kafka.common.resource.ResourceType;
 import org.mapstruct.Mapper;
 import org.mapstruct.Mapper;
 import org.mapstruct.Mapping;
 import org.mapstruct.Mapping;
-import org.mapstruct.Named;
 
 
 @Mapper(componentModel = "spring")
 @Mapper(componentModel = "spring")
 public interface ClusterMapper {
 public interface ClusterMapper {
 
 
   ClusterDTO toCluster(InternalClusterState clusterState);
   ClusterDTO toCluster(InternalClusterState clusterState);
 
 
-  @Mapping(target = "properties", source = "properties", qualifiedByName = "setProperties")
-  @Mapping(target = "schemaRegistry", source = ".", qualifiedByName = "setSchemaRegistry")
-  @Mapping(target = "ksqldbServer", source = ".", qualifiedByName = "setKsqldbServer")
-  @Mapping(target = "metricsConfig", source = "metrics")
-  KafkaCluster toKafkaCluster(ClustersProperties.Cluster clusterProperties);
-
   ClusterStatsDTO toClusterStats(InternalClusterState clusterState);
   ClusterStatsDTO toClusterStats(InternalClusterState clusterState);
 
 
   default ClusterMetricsDTO toClusterMetrics(Metrics metrics) {
   default ClusterMetricsDTO toClusterMetrics(Metrics metrics) {
@@ -107,54 +91,7 @@ public interface ClusterMapper {
 
 
   PartitionDTO toPartition(InternalPartition topic);
   PartitionDTO toPartition(InternalPartition topic);
 
 
-  @Named("setSchemaRegistry")
-  default InternalSchemaRegistry setSchemaRegistry(ClustersProperties.Cluster clusterProperties) {
-    if (clusterProperties == null
-        || clusterProperties.getSchemaRegistry() == null) {
-      return null;
-    }
-
-    InternalSchemaRegistry.InternalSchemaRegistryBuilder internalSchemaRegistry =
-        InternalSchemaRegistry.builder();
-
-    internalSchemaRegistry.url(
-        clusterProperties.getSchemaRegistry() != null
-            ? new FailoverUrlList(Arrays.asList(clusterProperties.getSchemaRegistry().split(",")))
-            : new FailoverUrlList(Collections.emptyList())
-    );
-
-    if (clusterProperties.getSchemaRegistryAuth() != null) {
-      internalSchemaRegistry.username(clusterProperties.getSchemaRegistryAuth().getUsername());
-      internalSchemaRegistry.password(clusterProperties.getSchemaRegistryAuth().getPassword());
-    }
-
-    if (clusterProperties.getSchemaRegistrySsl() != null) {
-      internalSchemaRegistry.keystoreLocation(clusterProperties.getSchemaRegistrySsl().getKeystoreLocation());
-      internalSchemaRegistry.keystorePassword(clusterProperties.getSchemaRegistrySsl().getKeystorePassword());
-      internalSchemaRegistry.truststoreLocation(clusterProperties.getSchemaRegistrySsl().getTruststoreLocation());
-      internalSchemaRegistry.truststorePassword(clusterProperties.getSchemaRegistrySsl().getTruststorePassword());
-    }
-
-    return internalSchemaRegistry.build();
-  }
-
-  @Named("setKsqldbServer")
-  default InternalKsqlServer setKsqldbServer(ClustersProperties.Cluster clusterProperties) {
-    if (clusterProperties == null
-            || clusterProperties.getKsqldbServer() == null) {
-      return null;
-    }
-
-    InternalKsqlServer.InternalKsqlServerBuilder internalKsqlServerBuilder =
-            InternalKsqlServer.builder().url(clusterProperties.getKsqldbServer());
-
-    if (clusterProperties.getKsqldbServerAuth() != null) {
-      internalKsqlServerBuilder.username(clusterProperties.getKsqldbServerAuth().getUsername());
-      internalKsqlServerBuilder.password(clusterProperties.getKsqldbServerAuth().getPassword());
-    }
-
-    return internalKsqlServerBuilder.build();
-  }
+  BrokerDTO toBrokerDto(InternalBroker broker);
 
 
   TopicDetailsDTO toTopicDetails(InternalTopic topic);
   TopicDetailsDTO toTopicDetails(InternalTopic topic);
 
 
@@ -164,16 +101,10 @@ public interface ClusterMapper {
 
 
   ReplicaDTO toReplica(InternalReplica replica);
   ReplicaDTO toReplica(InternalReplica replica);
 
 
-  ConnectDTO toKafkaConnect(KafkaConnectCluster connect);
+  ConnectDTO toKafkaConnect(ClustersProperties.ConnectCluster connect);
 
 
   List<ClusterDTO.FeaturesEnum> toFeaturesEnum(List<Feature> features);
   List<ClusterDTO.FeaturesEnum> toFeaturesEnum(List<Feature> features);
 
 
-  @Mapping(target = "isCompatible", source = "compatible")
-  CompatibilityCheckResponseDTO toCompatibilityCheckResponse(InternalCompatibilityCheck dto);
-
-  @Mapping(target = "compatibility", source = "compatibilityLevel")
-  CompatibilityLevelDTO toCompatibilityLevelDto(InternalCompatibilityLevel dto);
-
   default List<PartitionDTO> map(Map<Integer, InternalPartition> map) {
   default List<PartitionDTO> map(Map<Integer, InternalPartition> map) {
     return map.values().stream().map(this::toPartition).collect(Collectors.toList());
     return map.values().stream().map(this::toPartition).collect(Collectors.toList());
   }
   }
@@ -186,13 +117,8 @@ public interface ClusterMapper {
     return brokerDiskUsage;
     return brokerDiskUsage;
   }
   }
 
 
-  @Named("setProperties")
-  default Properties setProperties(Properties properties) {
-    Properties copy = new Properties();
-    if (properties != null) {
-      copy.putAll(properties);
-    }
-    return copy;
+  default DataMasking map(List<ClustersProperties.Masking> maskingProperties) {
+    return DataMasking.create(maskingProperties);
   }
   }
 
 
   static AclBinding toAclBinding(KafkaAclDTO dto) {
   static AclBinding toAclBinding(KafkaAclDTO dto) {

+ 11 - 7
kafka-ui-api/src/main/java/com/provectus/kafka/ui/mapper/ConsumerGroupMapper.java

@@ -89,13 +89,17 @@ public class ConsumerGroupMapper {
             .flatMap(m -> m.getAssignment().stream().map(TopicPartition::topic))
             .flatMap(m -> m.getAssignment().stream().map(TopicPartition::topic))
     ).collect(Collectors.toSet()).size();
     ).collect(Collectors.toSet()).size();
 
 
-    long messagesBehind = c.getOffsets().entrySet().stream()
-        .mapToLong(e ->
-            Optional.ofNullable(c.getEndOffsets())
-                .map(o -> o.get(e.getKey()))
-                .map(o -> o - e.getValue())
-                .orElse(0L)
-        ).sum();
+    Long messagesBehind = null;
+    // messagesBehind should be undefined if no committed offsets found for topic
+    if (!c.getOffsets().isEmpty()) {
+      messagesBehind = c.getOffsets().entrySet().stream()
+          .mapToLong(e ->
+              Optional.ofNullable(c.getEndOffsets())
+                  .map(o -> o.get(e.getKey()))
+                  .map(o -> o - e.getValue())
+                  .orElse(0L)
+          ).sum();
+    }
 
 
     consumerGroup.setMessagesBehind(messagesBehind);
     consumerGroup.setMessagesBehind(messagesBehind);
     consumerGroup.setTopics(numTopics);
     consumerGroup.setTopics(numTopics);

+ 37 - 0
kafka-ui-api/src/main/java/com/provectus/kafka/ui/mapper/KafkaSrMapper.java

@@ -0,0 +1,37 @@
+package com.provectus.kafka.ui.mapper;
+
+import com.provectus.kafka.ui.model.CompatibilityCheckResponseDTO;
+import com.provectus.kafka.ui.model.CompatibilityLevelDTO;
+import com.provectus.kafka.ui.model.NewSchemaSubjectDTO;
+import com.provectus.kafka.ui.model.SchemaSubjectDTO;
+import com.provectus.kafka.ui.model.SchemaTypeDTO;
+import com.provectus.kafka.ui.service.SchemaRegistryService;
+import com.provectus.kafka.ui.sr.model.Compatibility;
+import com.provectus.kafka.ui.sr.model.CompatibilityCheckResponse;
+import com.provectus.kafka.ui.sr.model.NewSubject;
+import com.provectus.kafka.ui.sr.model.SchemaType;
+import java.util.Optional;
+import org.mapstruct.Mapper;
+
+
+@Mapper(componentModel = "spring")
+public interface KafkaSrMapper {
+
+  default SchemaSubjectDTO toDto(SchemaRegistryService.SubjectWithCompatibilityLevel s) {
+    return new SchemaSubjectDTO()
+        .id(s.getId())
+        .version(s.getVersion())
+        .subject(s.getSubject())
+        .schema(s.getSchema())
+        .schemaType(SchemaTypeDTO.fromValue(Optional.ofNullable(s.getSchemaType()).orElse(SchemaType.AVRO).getValue()))
+        .compatibilityLevel(s.getCompatibility().toString());
+  }
+
+  CompatibilityCheckResponseDTO toDto(CompatibilityCheckResponse ccr);
+
+  CompatibilityLevelDTO.CompatibilityEnum toDto(Compatibility compatibility);
+
+  NewSubject fromDto(NewSchemaSubjectDTO subjectDto);
+
+  Compatibility fromDto(CompatibilityLevelDTO.CompatibilityEnum dtoEnum);
+}

+ 0 - 59
kafka-ui-api/src/main/java/com/provectus/kafka/ui/model/FailoverUrlList.java

@@ -1,59 +0,0 @@
-package com.provectus.kafka.ui.model;
-
-import java.time.Instant;
-import java.util.ArrayList;
-import java.util.List;
-import java.util.Map;
-import java.util.concurrent.ConcurrentHashMap;
-import java.util.concurrent.atomic.AtomicInteger;
-import lombok.experimental.Delegate;
-
-public class FailoverUrlList {
-
-  public static final int DEFAULT_RETRY_GRACE_PERIOD_IN_MS = 5000;
-
-  private final Map<Integer, Instant> failures = new ConcurrentHashMap<>();
-  private final AtomicInteger index = new AtomicInteger(0);
-  @Delegate
-  private final List<String> urls;
-  private final int retryGracePeriodInMs;
-
-  public FailoverUrlList(List<String> urls) {
-    this(urls, DEFAULT_RETRY_GRACE_PERIOD_IN_MS);
-  }
-
-  public FailoverUrlList(List<String> urls, int retryGracePeriodInMs) {
-    if (urls != null && !urls.isEmpty()) {
-      this.urls = new ArrayList<>(urls);
-    } else {
-      throw new IllegalArgumentException("Expected at least one URL to be passed in constructor");
-    }
-    this.retryGracePeriodInMs = retryGracePeriodInMs;
-  }
-
-  public String current() {
-    return this.urls.get(this.index.get());
-  }
-
-  public void fail(String url) {
-    int currentIndex = this.index.get();
-    if ((this.urls.get(currentIndex)).equals(url)) {
-      this.failures.put(currentIndex, Instant.now());
-      this.index.compareAndSet(currentIndex, (currentIndex + 1) % this.urls.size());
-    }
-  }
-
-  public boolean isFailoverAvailable() {
-    var now = Instant.now();
-    return this.urls.size() > this.failures.size()
-            || this.failures
-                    .values()
-                    .stream()
-                    .anyMatch(e -> now.isAfter(e.plusMillis(retryGracePeriodInMs)));
-  }
-
-  @Override
-  public String toString() {
-    return this.urls.toString();
-  }
-}

+ 24 - 0
kafka-ui-api/src/main/java/com/provectus/kafka/ui/model/InternalBroker.java

@@ -0,0 +1,24 @@
+package com.provectus.kafka.ui.model;
+
+import java.math.BigDecimal;
+import lombok.Data;
+import org.apache.kafka.common.Node;
+
+@Data
+public class InternalBroker {
+
+  private final Integer id;
+  private final String host;
+  private final Integer port;
+  private final BigDecimal bytesInPerSec;
+  private final BigDecimal bytesOutPerSec;
+
+  public InternalBroker(Node node, Statistics statistics) {
+    this.id = node.id();
+    this.host = node.host();
+    this.port = node.port();
+    this.bytesInPerSec = statistics.getMetrics().getBrokerBytesInPerSec().get(node.id());
+    this.bytesOutPerSec = statistics.getMetrics().getBrokerBytesOutPerSec().get(node.id());
+  }
+
+}

+ 10 - 5
kafka-ui-api/src/main/java/com/provectus/kafka/ui/model/InternalClusterState.java

@@ -6,6 +6,7 @@ import java.util.List;
 import java.util.Optional;
 import java.util.Optional;
 import java.util.stream.Collectors;
 import java.util.stream.Collectors;
 import lombok.Data;
 import lombok.Data;
+import org.apache.kafka.common.Node;
 
 
 @Data
 @Data
 public class InternalClusterState {
 public class InternalClusterState {
@@ -37,7 +38,9 @@ public class InternalClusterState {
         .orElse(null);
         .orElse(null);
     topicCount = statistics.getTopicDescriptions().size();
     topicCount = statistics.getTopicDescriptions().size();
     brokerCount = statistics.getClusterDescription().getNodes().size();
     brokerCount = statistics.getClusterDescription().getNodes().size();
-    activeControllers = statistics.getClusterDescription().getController() != null ? 1 : 0;
+    activeControllers = Optional.ofNullable(statistics.getClusterDescription().getController())
+        .map(Node::id)
+        .orElse(null);
     version = statistics.getVersion();
     version = statistics.getVersion();
 
 
     if (statistics.getLogDirInfo() != null) {
     if (statistics.getLogDirInfo() != null) {
@@ -53,15 +56,17 @@ public class InternalClusterState {
 
 
     bytesInPerSec = statistics
     bytesInPerSec = statistics
         .getMetrics()
         .getMetrics()
-        .getBytesInPerSec()
+        .getBrokerBytesInPerSec()
         .values().stream()
         .values().stream()
-        .reduce(BigDecimal.ZERO, BigDecimal::add);
+        .reduce(BigDecimal::add)
+        .orElse(null);
 
 
     bytesOutPerSec = statistics
     bytesOutPerSec = statistics
         .getMetrics()
         .getMetrics()
-        .getBytesOutPerSec()
+        .getBrokerBytesOutPerSec()
         .values().stream()
         .values().stream()
-        .reduce(BigDecimal.ZERO, BigDecimal::add);
+        .reduce(BigDecimal::add)
+        .orElse(null);
 
 
     var partitionsStats = new PartitionsStats(statistics.getTopicDescriptions().values());
     var partitionsStats = new PartitionsStats(statistics.getTopicDescriptions().values());
     onlinePartitionCount = partitionsStats.getOnlinePartitionCount();
     onlinePartitionCount = partitionsStats.getOnlinePartitionCount();

+ 0 - 14
kafka-ui-api/src/main/java/com/provectus/kafka/ui/model/InternalKsqlServer.java

@@ -1,14 +0,0 @@
-package com.provectus.kafka.ui.model;
-
-import lombok.Builder;
-import lombok.Data;
-import lombok.ToString;
-
-@Data
-@ToString(exclude = "password")
-@Builder(toBuilder = true)
-public class InternalKsqlServer {
-  private final String url;
-  private final String username;
-  private final String password;
-}

+ 0 - 33
kafka-ui-api/src/main/java/com/provectus/kafka/ui/model/InternalSchemaRegistry.java

@@ -1,33 +0,0 @@
-package com.provectus.kafka.ui.model;
-
-import lombok.Builder;
-import lombok.Data;
-
-@Data
-@Builder(toBuilder = true)
-public class InternalSchemaRegistry {
-  private final String username;
-  private final String password;
-  private final FailoverUrlList url;
-
-  private final String keystoreLocation;
-  private final String truststoreLocation;
-  private final String keystorePassword;
-  private final String truststorePassword;
-
-  public String getPrimaryNodeUri() {
-    return url.get(0);
-  }
-
-  public String getUri() {
-    return url.current();
-  }
-
-  public void markAsUnavailable(String url) {
-    this.url.fail(url);
-  }
-
-  public boolean isFailoverAvailable() {
-    return this.url.isFailoverAvailable();
-  }
-}

+ 2 - 2
kafka-ui-api/src/main/java/com/provectus/kafka/ui/model/InternalTopic.java

@@ -102,8 +102,8 @@ public class InternalTopic {
       topic.segmentSize(segmentStats.getSegmentSize());
       topic.segmentSize(segmentStats.getSegmentSize());
     }
     }
 
 
-    topic.bytesInPerSec(metrics.getBytesInPerSec().get(topicDescription.name()));
-    topic.bytesOutPerSec(metrics.getBytesOutPerSec().get(topicDescription.name()));
+    topic.bytesInPerSec(metrics.getTopicBytesInPerSec().get(topicDescription.name()));
+    topic.bytesOutPerSec(metrics.getTopicBytesOutPerSec().get(topicDescription.name()));
 
 
     topic.topicConfigs(
     topic.topicConfigs(
         configs.stream().map(InternalTopicConfig::from).collect(Collectors.toList()));
         configs.stream().map(InternalTopicConfig::from).collect(Collectors.toList()));

+ 16 - 4
kafka-ui-api/src/main/java/com/provectus/kafka/ui/model/KafkaCluster.java

@@ -1,7 +1,15 @@
 package com.provectus.kafka.ui.model;
 package com.provectus.kafka.ui.model;
 
 
-import java.util.List;
+import com.provectus.kafka.ui.config.ClustersProperties;
+import com.provectus.kafka.ui.connect.api.KafkaConnectClientApi;
+import com.provectus.kafka.ui.service.ksql.KsqlApiClient;
+import com.provectus.kafka.ui.service.masking.DataMasking;
+import com.provectus.kafka.ui.sr.api.KafkaSrClientApi;
+import com.provectus.kafka.ui.util.PollingThrottler;
+import com.provectus.kafka.ui.util.ReactiveFailover;
+import java.util.Map;
 import java.util.Properties;
 import java.util.Properties;
+import java.util.function.Supplier;
 import lombok.AccessLevel;
 import lombok.AccessLevel;
 import lombok.AllArgsConstructor;
 import lombok.AllArgsConstructor;
 import lombok.Builder;
 import lombok.Builder;
@@ -11,14 +19,18 @@ import lombok.Data;
 @Builder(toBuilder = true)
 @Builder(toBuilder = true)
 @AllArgsConstructor(access = AccessLevel.PRIVATE)
 @AllArgsConstructor(access = AccessLevel.PRIVATE)
 public class KafkaCluster {
 public class KafkaCluster {
+  private final ClustersProperties.Cluster originalProperties;
+
   private final String name;
   private final String name;
   private final String version;
   private final String version;
   private final String bootstrapServers;
   private final String bootstrapServers;
-  private final InternalSchemaRegistry schemaRegistry;
-  private final InternalKsqlServer ksqldbServer;
-  private final List<KafkaConnectCluster> kafkaConnect;
   private final Properties properties;
   private final Properties properties;
   private final boolean readOnly;
   private final boolean readOnly;
   private final boolean disableLogDirsCollection;
   private final boolean disableLogDirsCollection;
   private final MetricsConfig metricsConfig;
   private final MetricsConfig metricsConfig;
+  private final DataMasking masking;
+  private final Supplier<PollingThrottler> throttler;
+  private final ReactiveFailover<KafkaSrClientApi> schemaRegistryClient;
+  private final Map<String, ReactiveFailover<KafkaConnectClientApi>> connectsClients;
+  private final ReactiveFailover<KsqlApiClient> ksqlClient;
 }
 }

+ 0 - 16
kafka-ui-api/src/main/java/com/provectus/kafka/ui/model/KafkaConnectCluster.java

@@ -1,16 +0,0 @@
-package com.provectus.kafka.ui.model;
-
-import lombok.AccessLevel;
-import lombok.AllArgsConstructor;
-import lombok.Builder;
-import lombok.Data;
-
-@Data
-@Builder(toBuilder = true)
-@AllArgsConstructor(access = AccessLevel.PRIVATE)
-public class KafkaConnectCluster {
-  private final String name;
-  private final String address;
-  private final String userName;
-  private final String password;
-}

+ 9 - 4
kafka-ui-api/src/main/java/com/provectus/kafka/ui/model/Metrics.java

@@ -15,14 +15,19 @@ import lombok.Value;
 @Builder
 @Builder
 @Value
 @Value
 public class Metrics {
 public class Metrics {
-  Map<String, BigDecimal> bytesInPerSec;
-  Map<String, BigDecimal> bytesOutPerSec;
+
+  Map<Integer, BigDecimal> brokerBytesInPerSec;
+  Map<Integer, BigDecimal> brokerBytesOutPerSec;
+  Map<String, BigDecimal> topicBytesInPerSec;
+  Map<String, BigDecimal> topicBytesOutPerSec;
   Map<Integer, List<RawMetric>> perBrokerMetrics;
   Map<Integer, List<RawMetric>> perBrokerMetrics;
 
 
   public static Metrics empty() {
   public static Metrics empty() {
     return Metrics.builder()
     return Metrics.builder()
-        .bytesInPerSec(Map.of())
-        .bytesOutPerSec(Map.of())
+        .brokerBytesInPerSec(Map.of())
+        .brokerBytesOutPerSec(Map.of())
+        .topicBytesInPerSec(Map.of())
+        .topicBytesOutPerSec(Map.of())
         .perBrokerMetrics(Map.of())
         .perBrokerMetrics(Map.of())
         .build();
         .build();
   }
   }

+ 134 - 0
kafka-ui-api/src/main/java/com/provectus/kafka/ui/model/rbac/AccessContext.java

@@ -0,0 +1,134 @@
+package com.provectus.kafka.ui.model.rbac;
+
+import com.provectus.kafka.ui.model.rbac.permission.ClusterConfigAction;
+import com.provectus.kafka.ui.model.rbac.permission.ConnectAction;
+import com.provectus.kafka.ui.model.rbac.permission.ConsumerGroupAction;
+import com.provectus.kafka.ui.model.rbac.permission.KsqlAction;
+import com.provectus.kafka.ui.model.rbac.permission.SchemaAction;
+import com.provectus.kafka.ui.model.rbac.permission.TopicAction;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.List;
+import lombok.Value;
+import org.springframework.util.Assert;
+
+@Value
+public class AccessContext {
+
+  String cluster;
+  Collection<ClusterConfigAction> clusterConfigActions;
+
+  String topic;
+  Collection<TopicAction> topicActions;
+
+  String consumerGroup;
+  Collection<ConsumerGroupAction> consumerGroupActions;
+
+  String connect;
+  Collection<ConnectAction> connectActions;
+
+  String connector;
+
+  String schema;
+  Collection<SchemaAction> schemaActions;
+
+  Collection<KsqlAction> ksqlActions;
+
+  public static AccessContextBuilder builder() {
+    return new AccessContextBuilder();
+  }
+
+  public static final class AccessContextBuilder {
+    private String cluster;
+    private Collection<ClusterConfigAction> clusterConfigActions = Collections.emptySet();
+    private String topic;
+    private Collection<TopicAction> topicActions = Collections.emptySet();
+    private String consumerGroup;
+    private Collection<ConsumerGroupAction> consumerGroupActions = Collections.emptySet();
+    private String connect;
+    private Collection<ConnectAction> connectActions = Collections.emptySet();
+    private String connector;
+    private String schema;
+    private Collection<SchemaAction> schemaActions = Collections.emptySet();
+    private Collection<KsqlAction> ksqlActions = Collections.emptySet();
+
+    private AccessContextBuilder() {
+    }
+
+    public AccessContextBuilder cluster(String cluster) {
+      this.cluster = cluster;
+      return this;
+    }
+
+    public AccessContextBuilder clusterConfigActions(ClusterConfigAction... actions) {
+      Assert.isTrue(actions.length > 0, "actions not present");
+      this.clusterConfigActions = List.of(actions);
+      return this;
+    }
+
+    public AccessContextBuilder topic(String topic) {
+      this.topic = topic;
+      return this;
+    }
+
+    public AccessContextBuilder topicActions(TopicAction... actions) {
+      Assert.isTrue(actions.length > 0, "actions not present");
+      this.topicActions = List.of(actions);
+      return this;
+    }
+
+    public AccessContextBuilder consumerGroup(String consumerGroup) {
+      this.consumerGroup = consumerGroup;
+      return this;
+    }
+
+    public AccessContextBuilder consumerGroupActions(ConsumerGroupAction... actions) {
+      Assert.isTrue(actions.length > 0, "actions not present");
+      this.consumerGroupActions = List.of(actions);
+      return this;
+    }
+
+    public AccessContextBuilder connect(String connect) {
+      this.connect = connect;
+      return this;
+    }
+
+    public AccessContextBuilder connectActions(ConnectAction... actions) {
+      Assert.isTrue(actions.length > 0, "actions not present");
+      this.connectActions = List.of(actions);
+      return this;
+    }
+
+    public AccessContextBuilder connector(String connector) {
+      this.connector = connector;
+      return this;
+    }
+
+    public AccessContextBuilder schema(String schema) {
+      this.schema = schema;
+      return this;
+    }
+
+    public AccessContextBuilder schemaActions(SchemaAction... actions) {
+      Assert.isTrue(actions.length > 0, "actions not present");
+      this.schemaActions = List.of(actions);
+      return this;
+    }
+
+    public AccessContextBuilder ksqlActions(KsqlAction... actions) {
+      Assert.isTrue(actions.length > 0, "actions not present");
+      this.ksqlActions = List.of(actions);
+      return this;
+    }
+
+    public AccessContext build() {
+      return new AccessContext(cluster, clusterConfigActions,
+          topic, topicActions,
+          consumerGroup, consumerGroupActions,
+          connect, connectActions,
+          connector,
+          schema, schemaActions,
+          ksqlActions);
+    }
+  }
+}

+ 72 - 0
kafka-ui-api/src/main/java/com/provectus/kafka/ui/model/rbac/Permission.java

@@ -0,0 +1,72 @@
+package com.provectus.kafka.ui.model.rbac;
+
+import static com.provectus.kafka.ui.model.rbac.Resource.CLUSTERCONFIG;
+import static com.provectus.kafka.ui.model.rbac.Resource.KSQL;
+
+import com.provectus.kafka.ui.model.rbac.permission.ClusterConfigAction;
+import com.provectus.kafka.ui.model.rbac.permission.ConnectAction;
+import com.provectus.kafka.ui.model.rbac.permission.ConsumerGroupAction;
+import com.provectus.kafka.ui.model.rbac.permission.KsqlAction;
+import com.provectus.kafka.ui.model.rbac.permission.SchemaAction;
+import com.provectus.kafka.ui.model.rbac.permission.TopicAction;
+import java.util.Arrays;
+import java.util.List;
+import java.util.regex.Pattern;
+import lombok.EqualsAndHashCode;
+import lombok.Getter;
+import lombok.ToString;
+import org.apache.commons.collections.CollectionUtils;
+import org.jetbrains.annotations.Nullable;
+import org.springframework.util.Assert;
+
+@Getter
+@ToString
+@EqualsAndHashCode
+public class Permission {
+
+  Resource resource;
+
+  @Nullable
+  Pattern value;
+  List<String> actions;
+
+  @SuppressWarnings("unused")
+  public void setResource(String resource) {
+    this.resource = Resource.fromString(resource.toUpperCase());
+  }
+
+  public void setValue(String value) {
+    this.value = Pattern.compile(value);
+  }
+
+  @SuppressWarnings("unused")
+  public void setActions(List<String> actions) {
+    this.actions = actions;
+  }
+
+  public void validate() {
+    Assert.notNull(resource, "resource cannot be null");
+    if (!List.of(KSQL, CLUSTERCONFIG).contains(this.resource)) {
+      Assert.notNull(value, "permission value can't be empty for resource " + resource);
+    }
+  }
+
+  public void transform() {
+    if (CollectionUtils.isEmpty(actions) || this.actions.stream().noneMatch("ALL"::equalsIgnoreCase)) {
+      return;
+    }
+    this.actions = getActionValues();
+  }
+
+  private List<String> getActionValues() {
+    return switch (this.resource) {
+      case CLUSTERCONFIG -> Arrays.stream(ClusterConfigAction.values()).map(Enum::toString).toList();
+      case TOPIC -> Arrays.stream(TopicAction.values()).map(Enum::toString).toList();
+      case CONSUMER -> Arrays.stream(ConsumerGroupAction.values()).map(Enum::toString).toList();
+      case SCHEMA -> Arrays.stream(SchemaAction.values()).map(Enum::toString).toList();
+      case CONNECT -> Arrays.stream(ConnectAction.values()).map(Enum::toString).toList();
+      case KSQL -> Arrays.stream(KsqlAction.values()).map(Enum::toString).toList();
+    };
+  }
+
+}

+ 21 - 0
kafka-ui-api/src/main/java/com/provectus/kafka/ui/model/rbac/Resource.java

@@ -0,0 +1,21 @@
+package com.provectus.kafka.ui.model.rbac;
+
+import org.apache.commons.lang3.EnumUtils;
+import org.jetbrains.annotations.Nullable;
+
+public enum Resource {
+
+  CLUSTERCONFIG,
+  TOPIC,
+  CONSUMER,
+  SCHEMA,
+  CONNECT,
+  KSQL;
+
+  @Nullable
+  public static Resource fromString(String name) {
+    return EnumUtils.getEnum(Resource.class, name);
+  }
+
+
+}

+ 19 - 0
kafka-ui-api/src/main/java/com/provectus/kafka/ui/model/rbac/Role.java

@@ -0,0 +1,19 @@
+package com.provectus.kafka.ui.model.rbac;
+
+import java.util.List;
+import lombok.Data;
+
+@Data
+public class Role {
+
+  String name;
+  List<String> clusters;
+  List<Subject> subjects;
+  List<Permission> permissions;
+
+  public void validate() {
+    permissions.forEach(Permission::transform);
+    permissions.forEach(Permission::validate);
+  }
+
+}

+ 24 - 0
kafka-ui-api/src/main/java/com/provectus/kafka/ui/model/rbac/Subject.java

@@ -0,0 +1,24 @@
+package com.provectus.kafka.ui.model.rbac;
+
+import com.provectus.kafka.ui.model.rbac.provider.Provider;
+import lombok.Getter;
+
+@Getter
+public class Subject {
+
+  Provider provider;
+  String type;
+  String value;
+
+  public void setProvider(String provider) {
+    this.provider = Provider.fromString(provider.toUpperCase());
+  }
+
+  public void setType(String type) {
+    this.type = type;
+  }
+
+  public void setValue(String value) {
+    this.value = value;
+  }
+}

+ 18 - 0
kafka-ui-api/src/main/java/com/provectus/kafka/ui/model/rbac/permission/ClusterConfigAction.java

@@ -0,0 +1,18 @@
+package com.provectus.kafka.ui.model.rbac.permission;
+
+import org.apache.commons.lang3.EnumUtils;
+import org.jetbrains.annotations.Nullable;
+
+public enum ClusterConfigAction implements PermissibleAction {
+
+  VIEW,
+  EDIT
+
+  ;
+
+  @Nullable
+  public static ClusterConfigAction fromString(String name) {
+    return EnumUtils.getEnum(ClusterConfigAction.class, name);
+  }
+
+}

+ 19 - 0
kafka-ui-api/src/main/java/com/provectus/kafka/ui/model/rbac/permission/ConnectAction.java

@@ -0,0 +1,19 @@
+package com.provectus.kafka.ui.model.rbac.permission;
+
+import org.apache.commons.lang3.EnumUtils;
+import org.jetbrains.annotations.Nullable;
+
+public enum ConnectAction implements PermissibleAction {
+
+  VIEW,
+  EDIT,
+  CREATE
+
+  ;
+
+  @Nullable
+  public static ConnectAction fromString(String name) {
+    return EnumUtils.getEnum(ConnectAction.class, name);
+  }
+
+}

+ 20 - 0
kafka-ui-api/src/main/java/com/provectus/kafka/ui/model/rbac/permission/ConsumerGroupAction.java

@@ -0,0 +1,20 @@
+package com.provectus.kafka.ui.model.rbac.permission;
+
+import org.apache.commons.lang3.EnumUtils;
+import org.jetbrains.annotations.Nullable;
+
+public enum ConsumerGroupAction implements PermissibleAction {
+
+  VIEW,
+  DELETE,
+
+  RESET_OFFSETS
+
+  ;
+
+  @Nullable
+  public static ConsumerGroupAction fromString(String name) {
+    return EnumUtils.getEnum(ConsumerGroupAction.class, name);
+  }
+
+}

+ 15 - 0
kafka-ui-api/src/main/java/com/provectus/kafka/ui/model/rbac/permission/KsqlAction.java

@@ -0,0 +1,15 @@
+package com.provectus.kafka.ui.model.rbac.permission;
+
+import org.apache.commons.lang3.EnumUtils;
+import org.jetbrains.annotations.Nullable;
+
+public enum KsqlAction implements PermissibleAction {
+
+  EXECUTE;
+
+  @Nullable
+  public static KsqlAction fromString(String name) {
+    return EnumUtils.getEnum(KsqlAction.class, name);
+  }
+
+}

Alguns ficheiros não foram mostrados porque muitos ficheiros mudaram neste diff