From cc468b2f5d88cda1efeb69cccb87dc255419351e Mon Sep 17 00:00:00 2001 From: Maciej Mensfeld Date: Wed, 18 Mar 2026 12:28:10 +0100 Subject: [PATCH 1/3] switch to minitest --- .../ci_linux_alpine_aarch64_musl.yml | 16 +- ...inux_alpine_aarch64_musl_complementary.yml | 40 +- .../workflows/ci_linux_alpine_x86_64_musl.yml | 16 +- ...linux_alpine_x86_64_musl_complementary.yml | 40 +- .../workflows/ci_linux_debian_x86_64_gnu.yml | 22 +- ..._linux_debian_x86_64_gnu_complementary.yml | 42 +- .../workflows/ci_linux_ubuntu_aarch64_gnu.yml | 22 +- ...linux_ubuntu_aarch64_gnu_complementary.yml | 34 +- .../workflows/ci_linux_ubuntu_x86_64_gnu.yml | 26 +- ..._linux_ubuntu_x86_64_gnu_complementary.yml | 34 +- .github/workflows/ci_macos_arm64.yml | 16 +- .rspec | 4 - Gemfile | 3 +- Rakefile | 7 + rdkafka.gemspec | 1 + .../librdkafka_admin_features_spec.rb | 84 -- .../librdkafka_consumer_features_spec.rb | 85 -- .../librdkafka_producer_features_spec.rb | 84 -- spec/integrations/ssl_stress_spec.rb | 112 -- .../unregistered_scheme_file_spec.rb | 118 -- spec/lib/rdkafka/abstract_handle_spec.rb | 153 --- .../rdkafka/admin/create_acl_handle_spec.rb | 54 - .../rdkafka/admin/create_topic_handle_spec.rb | 53 - .../rdkafka/admin/create_topic_report_spec.rb | 18 - .../rdkafka/admin/delete_acl_handle_spec.rb | 83 -- .../rdkafka/admin/delete_acl_report_spec.rb | 69 - .../rdkafka/admin/delete_topic_handle_spec.rb | 55 - .../rdkafka/admin/delete_topic_report_spec.rb | 18 - .../rdkafka/admin/describe_acl_handle_spec.rb | 83 -- .../rdkafka/admin/describe_acl_report_spec.rb | 69 - .../rdkafka/admin/list_offsets_handle_spec.rb | 52 - .../rdkafka/admin/list_offsets_report_spec.rb | 13 - spec/lib/rdkafka/admin_spec.rb | 1198 ---------------- spec/lib/rdkafka/callbacks_spec.rb | 1 - spec/lib/rdkafka/consumer/headers_spec.rb | 73 - spec/lib/rdkafka/consumer/message_spec.rb | 137 -- spec/lib/rdkafka/consumer/partition_spec.rb | 58 - spec/lib/rdkafka/defaults_spec.rb | 113 -- spec/lib/rdkafka/error_spec.rb | 132 -- spec/lib/rdkafka/metadata_spec.rb | 81 -- spec/lib/rdkafka/native_kafka_spec.rb | 178 --- .../rdkafka/producer/delivery_handle_spec.rb | 60 - .../rdkafka/producer/delivery_report_spec.rb | 27 - .../producer/partitions_count_cache_spec.rb | 413 ------ spec/spec_helper.rb | 76 -- .../librdkafka_admin_features_test.rb | 83 ++ .../librdkafka_consumer_features_test.rb | 84 ++ .../librdkafka_producer_features_test.rb | 83 ++ test/integrations/ssl_stress_test.rb | 126 ++ .../unregistered_scheme_file_test.rb | 117 ++ test/lib/rdkafka/abstract_handle_test.rb | 179 +++ .../rdkafka/admin/create_acl_handle_test.rb | 65 + .../rdkafka/admin/create_acl_report_test.rb | 14 +- .../rdkafka/admin/create_topic_handle_test.rb | 70 + .../rdkafka/admin/create_topic_report_test.rb | 20 + .../rdkafka/admin/delete_acl_handle_test.rb | 88 ++ .../rdkafka/admin/delete_acl_report_test.rb | 64 + .../rdkafka/admin/delete_topic_handle_test.rb | 70 + .../rdkafka/admin/delete_topic_report_test.rb | 20 + .../rdkafka/admin/describe_acl_handle_test.rb | 88 ++ .../rdkafka/admin/describe_acl_report_test.rb | 64 + .../rdkafka/admin/list_offsets_handle_test.rb | 72 + .../rdkafka/admin/list_offsets_report_test.rb | 17 + test/lib/rdkafka/admin_test.rb | 1208 +++++++++++++++++ .../lib/rdkafka/bindings_test.rb | 152 +-- test/lib/rdkafka/callbacks_test.rb | 3 + .../lib/rdkafka/config_test.rb | 130 +- test/lib/rdkafka/consumer/headers_test.rb | 91 ++ test/lib/rdkafka/consumer/message_test.rb | 128 ++ test/lib/rdkafka/consumer/partition_test.rb | 65 + .../consumer/topic_partition_list_test.rb | 144 +- .../lib/rdkafka/consumer_test.rb | 791 +++++------ test/lib/rdkafka/defaults_test.rb | 116 ++ test/lib/rdkafka/error_test.rb | 136 ++ test/lib/rdkafka/metadata_test.rb | 80 ++ test/lib/rdkafka/native_kafka_test.rb | 187 +++ .../rdkafka/producer/delivery_handle_test.rb | 77 ++ .../rdkafka/producer/delivery_report_test.rb | 30 + .../producer/partitions_count_cache_test.rb | 415 ++++++ .../lib/rdkafka/producer_test.rb | 694 +++++----- .../support/kafka_config_helpers.rb | 0 {spec => test}/support/kafka_wait_helpers.rb | 0 .../support/native_client_helpers.rb | 0 {spec => test}/support/test_topics.rb | 0 test/test_helper.rb | 126 ++ 85 files changed, 5109 insertions(+), 4861 deletions(-) delete mode 100644 .rspec delete mode 100644 spec/integrations/librdkafka_admin_features_spec.rb delete mode 100644 spec/integrations/librdkafka_consumer_features_spec.rb delete mode 100644 spec/integrations/librdkafka_producer_features_spec.rb delete mode 100644 spec/integrations/ssl_stress_spec.rb delete mode 100644 spec/integrations/unregistered_scheme_file_spec.rb delete mode 100644 spec/lib/rdkafka/abstract_handle_spec.rb delete mode 100644 spec/lib/rdkafka/admin/create_acl_handle_spec.rb delete mode 100644 spec/lib/rdkafka/admin/create_topic_handle_spec.rb delete mode 100644 spec/lib/rdkafka/admin/create_topic_report_spec.rb delete mode 100644 spec/lib/rdkafka/admin/delete_acl_handle_spec.rb delete mode 100644 spec/lib/rdkafka/admin/delete_acl_report_spec.rb delete mode 100644 spec/lib/rdkafka/admin/delete_topic_handle_spec.rb delete mode 100644 spec/lib/rdkafka/admin/delete_topic_report_spec.rb delete mode 100644 spec/lib/rdkafka/admin/describe_acl_handle_spec.rb delete mode 100644 spec/lib/rdkafka/admin/describe_acl_report_spec.rb delete mode 100644 spec/lib/rdkafka/admin/list_offsets_handle_spec.rb delete mode 100644 spec/lib/rdkafka/admin/list_offsets_report_spec.rb delete mode 100644 spec/lib/rdkafka/admin_spec.rb delete mode 100644 spec/lib/rdkafka/callbacks_spec.rb delete mode 100644 spec/lib/rdkafka/consumer/headers_spec.rb delete mode 100644 spec/lib/rdkafka/consumer/message_spec.rb delete mode 100644 spec/lib/rdkafka/consumer/partition_spec.rb delete mode 100644 spec/lib/rdkafka/defaults_spec.rb delete mode 100644 spec/lib/rdkafka/error_spec.rb delete mode 100644 spec/lib/rdkafka/metadata_spec.rb delete mode 100644 spec/lib/rdkafka/native_kafka_spec.rb delete mode 100644 spec/lib/rdkafka/producer/delivery_handle_spec.rb delete mode 100644 spec/lib/rdkafka/producer/delivery_report_spec.rb delete mode 100644 spec/lib/rdkafka/producer/partitions_count_cache_spec.rb delete mode 100644 spec/spec_helper.rb create mode 100644 test/integrations/librdkafka_admin_features_test.rb create mode 100644 test/integrations/librdkafka_consumer_features_test.rb create mode 100644 test/integrations/librdkafka_producer_features_test.rb create mode 100644 test/integrations/ssl_stress_test.rb create mode 100644 test/integrations/unregistered_scheme_file_test.rb create mode 100644 test/lib/rdkafka/abstract_handle_test.rb create mode 100644 test/lib/rdkafka/admin/create_acl_handle_test.rb rename spec/lib/rdkafka/admin/create_acl_report_spec.rb => test/lib/rdkafka/admin/create_acl_report_test.rb (52%) create mode 100644 test/lib/rdkafka/admin/create_topic_handle_test.rb create mode 100644 test/lib/rdkafka/admin/create_topic_report_test.rb create mode 100644 test/lib/rdkafka/admin/delete_acl_handle_test.rb create mode 100644 test/lib/rdkafka/admin/delete_acl_report_test.rb create mode 100644 test/lib/rdkafka/admin/delete_topic_handle_test.rb create mode 100644 test/lib/rdkafka/admin/delete_topic_report_test.rb create mode 100644 test/lib/rdkafka/admin/describe_acl_handle_test.rb create mode 100644 test/lib/rdkafka/admin/describe_acl_report_test.rb create mode 100644 test/lib/rdkafka/admin/list_offsets_handle_test.rb create mode 100644 test/lib/rdkafka/admin/list_offsets_report_test.rb create mode 100644 test/lib/rdkafka/admin_test.rb rename spec/lib/rdkafka/bindings_spec.rb => test/lib/rdkafka/bindings_test.rb (53%) create mode 100644 test/lib/rdkafka/callbacks_test.rb rename spec/lib/rdkafka/config_spec.rb => test/lib/rdkafka/config_test.rb (60%) create mode 100644 test/lib/rdkafka/consumer/headers_test.rb create mode 100644 test/lib/rdkafka/consumer/message_test.rb create mode 100644 test/lib/rdkafka/consumer/partition_test.rb rename spec/lib/rdkafka/consumer/topic_partition_list_spec.rb => test/lib/rdkafka/consumer/topic_partition_list_test.rb (62%) rename spec/lib/rdkafka/consumer_spec.rb => test/lib/rdkafka/consumer_test.rb (66%) create mode 100644 test/lib/rdkafka/defaults_test.rb create mode 100644 test/lib/rdkafka/error_test.rb create mode 100644 test/lib/rdkafka/metadata_test.rb create mode 100644 test/lib/rdkafka/native_kafka_test.rb create mode 100644 test/lib/rdkafka/producer/delivery_handle_test.rb create mode 100644 test/lib/rdkafka/producer/delivery_report_test.rb create mode 100644 test/lib/rdkafka/producer/partitions_count_cache_test.rb rename spec/lib/rdkafka/producer_spec.rb => test/lib/rdkafka/producer_test.rb (66%) rename {spec => test}/support/kafka_config_helpers.rb (100%) rename {spec => test}/support/kafka_wait_helpers.rb (100%) rename {spec => test}/support/native_client_helpers.rb (100%) rename {spec => test}/support/test_topics.rb (100%) create mode 100644 test/test_helper.rb diff --git a/.github/workflows/ci_linux_alpine_aarch64_musl.yml b/.github/workflows/ci_linux_alpine_aarch64_musl.yml index 31023924..394ee029 100644 --- a/.github/workflows/ci_linux_alpine_aarch64_musl.yml +++ b/.github/workflows/ci_linux_alpine_aarch64_musl.yml @@ -41,7 +41,7 @@ env: BUNDLE_JOBS: 4 jobs: - specs_install: + tests_install: timeout-minutes: 60 runs-on: ubuntu-24.04-arm strategy: @@ -80,7 +80,7 @@ jobs: sleep 2 done - - name: Run all specs in PLAINTEXT + - name: Run all tests in PLAINTEXT env: GITHUB_COVERAGE: ${{ matrix.coverage }} run: | @@ -101,7 +101,7 @@ jobs: bundle install && \ cd ext && bundle exec rake && \ cd .. && \ - bundle exec ruby -S rspec' + bundle exec rake test' - name: Verify Kafka warnings run: bin/verify_kafka_warnings @@ -139,7 +139,7 @@ jobs: path: ext/ retention-days: 1 - specs_precompiled: + tests_precompiled: timeout-minutes: 45 runs-on: ubuntu-24.04-arm needs: build_precompiled @@ -178,7 +178,7 @@ jobs: sleep 2 done - - name: Run specs with precompiled library and PLAINTEXT + - name: Run tests with precompiled library and PLAINTEXT env: GITHUB_COVERAGE: ${{ matrix.coverage }} RDKAFKA_EXT_PATH: ${{ github.workspace }}/ext @@ -195,7 +195,7 @@ jobs: sh -c 'apk add --no-cache git build-base linux-headers bash && \ git config --global --add safe.directory /workspace && \ bundle config set --local path vendor/bundle && \ - bundle install && bundle exec ruby -S rspec' + bundle install && bundle exec rake test' - name: Verify Kafka warnings run: bin/verify_kafka_warnings @@ -205,9 +205,9 @@ jobs: runs-on: ubuntu-24.04-arm if: always() needs: - - specs_install + - tests_install - build_precompiled - - specs_precompiled + - tests_precompiled steps: - name: Check all jobs passed if: | diff --git a/.github/workflows/ci_linux_alpine_aarch64_musl_complementary.yml b/.github/workflows/ci_linux_alpine_aarch64_musl_complementary.yml index 6bca1dde..dc0eb559 100644 --- a/.github/workflows/ci_linux_alpine_aarch64_musl_complementary.yml +++ b/.github/workflows/ci_linux_alpine_aarch64_musl_complementary.yml @@ -5,20 +5,20 @@ # on ARM64 architecture. # # WHY COMPLEMENTARY TESTING FOR ALPINE/MUSL ARM64: -# - SSL specs have been stable for 3+ years and rarely break due to code changes -# - Integration specs test musl libc and Alpine package compatibility on ARM64 +# - SSL tests have been stable for 3+ years and rarely break due to code changes +# - Integration tests test musl libc and Alpine package compatibility on ARM64 # - These tests catch regressions from external changes (OpenSSL updates, Alpine package updates) # - Running every 3 days to prevent these slower tests from blocking PR velocity # - Manual triggering allows testing workflow changes before they go into schedule # -# SSL TESTING (specs_install + specs_precompiled): +# SSL TESTING (tests_install + tests_precompiled): # - Tests SSL/TLS connectivity with Kafka using docker-compose-ssl.yml on Alpine ARM64 # - Validates certificate handling and SSL handshakes across Ruby versions on musl ARM64 # - Ensures SSL works with both compiled-from-source and precompiled flows on Alpine ARM64 # - Catches OpenSSL version compatibility issues and SSL library regressions on musl ARM64 # - Tests real SSL scenarios that mirror Alpine ARM64-based production deployments # -# INTEGRATION TESTING (integration specs in both jobs): +# INTEGRATION TESTING (integration tests in both jobs): # - Tests musl libc and Alpine system library compatibility on ARM64 without requiring Kafka infrastructure # - Validates libssl, libsasl2, libzstd, zlib integration across Alpine versions on ARM64 # - Ensures native extensions work with different Alpine package versions on ARM64 @@ -47,7 +47,7 @@ on: pull_request: paths: - '.github/workflows/ci_linux_alpine_aarch64_musl_complementary.yml' - - 'spec/integrations/**' + - 'test/integrations/**' branches: [ master ] push: branches: @@ -96,7 +96,7 @@ jobs: path: ext/ retention-days: 1 - specs_install: + tests_install: timeout-minutes: 60 runs-on: ubuntu-24.04-arm strategy: @@ -128,7 +128,7 @@ jobs: sleep 2 done - - name: Run all specs in SSL (compiled flow) + - name: Run all tests in SSL (compiled flow) env: KAFKA_SSL_ENABLED: "true" run: | @@ -152,13 +152,13 @@ jobs: echo "=== SSL Library Versions ===" && \ openssl version && \ apk list --installed | grep -E "(openssl|cyrus-sasl)" && \ - echo "=== Running SSL Specs (Compiled) ===" && \ - bundle exec ruby -S rspec' + echo "=== Running SSL Tests (Compiled) ===" && \ + bundle exec rake test' - name: Verify Kafka warnings run: bin/verify_kafka_warnings - - name: Run integration specs (compiled flow) + - name: Run integration tests (compiled flow) run: | docker run --rm --platform linux/arm64 \ -v "${{ github.workspace }}:/workspace" \ @@ -178,13 +178,13 @@ jobs: echo "=== Alpine/musl Library Versions ===" && \ openssl version && \ apk list --installed | grep -E "(openssl|cyrus-sasl|zstd|zlib)" && \ - echo "=== Running Integration Specs (Compiled) ===" && \ - for file in $(ls spec/integrations/*_spec.rb); do \ + echo "=== Running Integration Tests (Compiled) ===" && \ + for file in $(ls test/integrations/*_test.rb); do \ echo "Running $file with Ruby ${{ matrix.ruby }} on Alpine ${{ matrix.alpine_version }} ARM64"; \ bundle exec ruby "$file" || exit 1; \ done' - specs_precompiled: + tests_precompiled: timeout-minutes: 45 needs: build_precompiled strategy: @@ -221,7 +221,7 @@ jobs: sleep 2 done - - name: Run specs with precompiled library and SSL + - name: Run tests with precompiled library and SSL env: RDKAFKA_EXT_PATH: ${{ github.workspace }}/ext KAFKA_SSL_ENABLED: "true" @@ -239,12 +239,12 @@ jobs: git config --global --add safe.directory /workspace && \ bundle config set --local path vendor/bundle && bundle install && \ apk list --installed | grep -E "(openssl|cyrus-sasl)" && \ - bundle exec ruby -S rspec' + bundle exec rake test' - name: Verify Kafka warnings run: bin/verify_kafka_warnings - - name: Run integration specs (precompiled flow) + - name: Run integration tests (precompiled flow) env: RDKAFKA_EXT_PATH: ${{ github.workspace }}/ext RDKAFKA_PRECOMPILED: "true" @@ -265,8 +265,8 @@ jobs: bundle config set --local path vendor/bundle && \ bundle install && \ apk list --installed | grep -E "(openssl|cyrus-sasl|zstd|zlib)" && \ - echo "=== Running Integration Specs (Precompiled) ===" && \ - for file in $(ls spec/integrations/*_spec.rb); do \ + echo "=== Running Integration Tests (Precompiled) ===" && \ + for file in $(ls test/integrations/*_test.rb); do \ echo "Running $file with Ruby ${{ matrix.ruby }} (precompiled ARM64)"; \ bundle exec ruby "$file" || exit 1; \ done' @@ -276,9 +276,9 @@ jobs: runs-on: ubuntu-24.04-arm if: always() needs: - - specs_install + - tests_install - build_precompiled - - specs_precompiled + - tests_precompiled steps: - name: Check all jobs passed if: | diff --git a/.github/workflows/ci_linux_alpine_x86_64_musl.yml b/.github/workflows/ci_linux_alpine_x86_64_musl.yml index 3cfef48e..a4662eff 100644 --- a/.github/workflows/ci_linux_alpine_x86_64_musl.yml +++ b/.github/workflows/ci_linux_alpine_x86_64_musl.yml @@ -41,7 +41,7 @@ env: BUNDLE_JOBS: 4 jobs: - specs_install: + tests_install: timeout-minutes: 45 runs-on: ubuntu-latest strategy: @@ -80,7 +80,7 @@ jobs: sleep 2 done - - name: Run all specs in PLAINTEXT + - name: Run all tests in PLAINTEXT env: GITHUB_COVERAGE: ${{ matrix.coverage }} run: | @@ -101,7 +101,7 @@ jobs: bundle install && \ cd ext && bundle exec rake && \ cd .. && \ - bundle exec ruby -S rspec' + bundle exec rake test' - name: Verify Kafka warnings run: bin/verify_kafka_warnings @@ -141,7 +141,7 @@ jobs: path: ext/ retention-days: 1 - specs_precompiled: + tests_precompiled: timeout-minutes: 30 runs-on: ubuntu-latest needs: build_precompiled @@ -177,7 +177,7 @@ jobs: fi sleep 2 done - - name: Run specs with precompiled library and PLAINTEXT + - name: Run tests with precompiled library and PLAINTEXT env: GITHUB_COVERAGE: ${{ matrix.coverage }} RDKAFKA_EXT_PATH: ${{ github.workspace }}/ext @@ -194,7 +194,7 @@ jobs: sh -c 'apk add --no-cache git build-base linux-headers bash && \ git config --global --add safe.directory /workspace && \ bundle config set --local path vendor/bundle && \ - bundle install && bundle exec ruby -S rspec' + bundle install && bundle exec rake test' - name: Verify Kafka warnings run: bin/verify_kafka_warnings @@ -204,9 +204,9 @@ jobs: runs-on: ubuntu-latest if: always() needs: - - specs_install + - tests_install - build_precompiled - - specs_precompiled + - tests_precompiled steps: - name: Check all jobs passed if: | diff --git a/.github/workflows/ci_linux_alpine_x86_64_musl_complementary.yml b/.github/workflows/ci_linux_alpine_x86_64_musl_complementary.yml index 68a5e8ac..a3cfad4a 100644 --- a/.github/workflows/ci_linux_alpine_x86_64_musl_complementary.yml +++ b/.github/workflows/ci_linux_alpine_x86_64_musl_complementary.yml @@ -6,20 +6,20 @@ # schedule and when workflow changes are made. # # WHY COMPLEMENTARY TESTING FOR ALPINE/MUSL: -# - SSL specs have been stable for 3+ years and rarely break due to code changes -# - Integration specs test musl libc and Alpine package compatibility +# - SSL tests have been stable for 3+ years and rarely break due to code changes +# - Integration tests test musl libc and Alpine package compatibility # - These tests catch regressions from external changes (OpenSSL updates, Alpine package updates) # - Running every 3 days to prevent these slower tests from blocking PR velocity # - Manual triggering allows testing workflow changes before they go into schedule # -# SSL TESTING (specs_install + specs_precompiled): +# SSL TESTING (tests_install + tests_precompiled): # - Tests SSL/TLS connectivity with Kafka using docker-compose-ssl.yml on Alpine # - Validates certificate handling and SSL handshakes across Ruby versions on musl # - Ensures SSL works with both compiled-from-source and precompiled flows on Alpine # - Catches OpenSSL version compatibility issues and SSL library regressions on musl # - Tests real SSL scenarios that mirror Alpine-based production deployments # -# INTEGRATION TESTING (integration specs in both jobs): +# INTEGRATION TESTING (integration tests in both jobs): # - Tests musl libc and Alpine system library compatibility without requiring Kafka infrastructure # - Validates libssl, libsasl2, libzstd, zlib integration across Alpine versions # - Ensures native extensions work with different Alpine package versions @@ -48,7 +48,7 @@ on: pull_request: paths: - '.github/workflows/ci_linux_alpine_x86_64_musl_complementary.yml' - - 'spec/integrations/**' + - 'test/integrations/**' branches: [ master ] push: branches: @@ -99,7 +99,7 @@ jobs: path: ext/ retention-days: 1 - specs_install: + tests_install: timeout-minutes: 45 runs-on: ubuntu-latest strategy: @@ -131,7 +131,7 @@ jobs: sleep 2 done - - name: Run all specs in SSL (compiled flow) + - name: Run all tests in SSL (compiled flow) env: KAFKA_SSL_ENABLED: "true" run: | @@ -155,13 +155,13 @@ jobs: echo "=== SSL Library Versions ===" && \ openssl version && \ apk list --installed | grep -E "(openssl|cyrus-sasl)" && \ - echo "=== Running SSL Specs (Compiled) ===" && \ - bundle exec ruby -S rspec' + echo "=== Running SSL Tests (Compiled) ===" && \ + bundle exec rake test' - name: Verify Kafka warnings run: bin/verify_kafka_warnings - - name: Run integration specs (compiled flow) + - name: Run integration tests (compiled flow) run: | docker run --rm \ -v "${{ github.workspace }}:/workspace" \ @@ -181,13 +181,13 @@ jobs: echo "=== Alpine/musl Library Versions ===" && \ openssl version && \ apk list --installed | grep -E "(openssl|cyrus-sasl|zstd|zlib)" && \ - echo "=== Running Integration Specs (Compiled) ===" && \ - for file in $(ls spec/integrations/*_spec.rb); do \ + echo "=== Running Integration Tests (Compiled) ===" && \ + for file in $(ls test/integrations/*_test.rb); do \ echo "Running $file with Ruby ${{ matrix.ruby }} on Alpine ${{ matrix.alpine_version }}"; \ bundle exec ruby "$file" || exit 1; \ done' - specs_precompiled: + tests_precompiled: timeout-minutes: 30 needs: build_precompiled strategy: @@ -222,7 +222,7 @@ jobs: sleep 2 done - - name: Run specs with precompiled library and SSL + - name: Run tests with precompiled library and SSL env: RDKAFKA_EXT_PATH: ${{ github.workspace }}/ext KAFKA_SSL_ENABLED: "true" @@ -240,12 +240,12 @@ jobs: git config --global --add safe.directory /workspace && \ bundle config set --local path vendor/bundle && bundle install && \ apk list --installed | grep -E "(openssl|cyrus-sasl)" && \ - bundle exec ruby -S rspec' + bundle exec rake test' - name: Verify Kafka warnings run: bin/verify_kafka_warnings - - name: Run integration specs (precompiled flow) + - name: Run integration tests (precompiled flow) env: RDKAFKA_EXT_PATH: ${{ github.workspace }}/ext RDKAFKA_PRECOMPILED: "true" @@ -266,8 +266,8 @@ jobs: bundle config set --local path vendor/bundle && \ bundle install && \ apk list --installed | grep -E "(openssl|cyrus-sasl|zstd|zlib)" && \ - echo "=== Running Integration Specs (Precompiled) ===" && \ - for file in $(ls spec/integrations/*_spec.rb); do \ + echo "=== Running Integration Tests (Precompiled) ===" && \ + for file in $(ls test/integrations/*_test.rb); do \ echo "Running $file with Ruby ${{ matrix.ruby }} (precompiled)"; \ bundle exec ruby "$file" || exit 1; \ done' @@ -277,9 +277,9 @@ jobs: runs-on: ubuntu-latest if: always() needs: - - specs_install + - tests_install - build_precompiled - - specs_precompiled + - tests_precompiled steps: - name: Check all jobs passed if: | diff --git a/.github/workflows/ci_linux_debian_x86_64_gnu.yml b/.github/workflows/ci_linux_debian_x86_64_gnu.yml index 99decf2e..7d05ad23 100644 --- a/.github/workflows/ci_linux_debian_x86_64_gnu.yml +++ b/.github/workflows/ci_linux_debian_x86_64_gnu.yml @@ -12,13 +12,13 @@ # - Different Ruby versions # - Forward compatibility testing with Trixie (Debian testing/future stable) # -# COMPILATION FLOW (build_install + specs_install): +# COMPILATION FLOW (build_install + tests_install): # - Tests that librdkafka compiles correctly from source on both Debian versions # - Validates that mini_portile2 can successfully build native dependencies # - Ensures Ruby native extensions link properly with system libraries # - Verifies that the same codebase works across different toolchain versions # -# PRECOMPILED FLOW (build_precompiled + specs_precompiled): +# PRECOMPILED FLOW (build_precompiled + tests_precompiled): # - Tests our precompiled static libraries work on both Debian versions # - Validates that statically-linked binaries are truly portable across environments # - Ensures precompiled libraries don't have unexpected system dependencies @@ -53,7 +53,7 @@ env: BUNDLE_JOBS: 4 jobs: - specs_install: + tests_install: timeout-minutes: 50 strategy: fail-fast: false @@ -121,7 +121,7 @@ jobs: bundle install && \ cd ext && bundle exec rake' - - name: Run all specs in PLAINTEXT + - name: Run all tests in PLAINTEXT env: GITHUB_COVERAGE: ${{matrix.coverage}} RDKAFKA_EXT_PATH: ${{ github.workspace }}/ext @@ -140,7 +140,7 @@ jobs: git config --global --add safe.directory /workspace && \ bundle config set --local path vendor/bundle && \ bundle install && \ - bundle exec rspec' + bundle exec rake test' - name: Verify Kafka warnings run: bin/verify_kafka_warnings @@ -153,7 +153,7 @@ jobs: debian: - 'bookworm' - 'trixie' - # We precompile on older Ubuntu and check compatibility by running specs since we aim to + # We precompile on older Ubuntu and check compatibility by running tests since we aim to # release only one precompiled version for all supported Ubuntu versions # This is why we do not want Renovate to update it automatically runs-on: ubuntu-22.04 # renovate: ignore @@ -206,7 +206,7 @@ jobs: path: ext/ retention-days: 1 - specs_precompiled: + tests_precompiled: timeout-minutes: 50 needs: build_precompiled strategy: @@ -245,7 +245,7 @@ jobs: fi sleep 2 done - - name: Run specs with precompiled library and PLAINTEXT + - name: Run tests with precompiled library and PLAINTEXT env: GITHUB_COVERAGE: ${{ matrix.coverage }} RDKAFKA_EXT_PATH: ${{ github.workspace }}/ext @@ -263,7 +263,7 @@ jobs: apt-get install -y git && \ git config --global --add safe.directory /workspace && \ bundle config set --local path vendor/bundle && \ - bundle install && bundle exec rspec' + bundle install && bundle exec rake test' - name: Verify Kafka warnings run: bin/verify_kafka_warnings @@ -273,9 +273,9 @@ jobs: runs-on: ubuntu-latest if: always() needs: - - specs_install + - tests_install - build_precompiled - - specs_precompiled + - tests_precompiled steps: - name: Check all jobs passed if: | diff --git a/.github/workflows/ci_linux_debian_x86_64_gnu_complementary.yml b/.github/workflows/ci_linux_debian_x86_64_gnu_complementary.yml index 566b58d8..e987b4bf 100644 --- a/.github/workflows/ci_linux_debian_x86_64_gnu_complementary.yml +++ b/.github/workflows/ci_linux_debian_x86_64_gnu_complementary.yml @@ -6,8 +6,8 @@ # schedule and when workflow changes are made. # # WHY COMPLEMENTARY TESTING: -# - SSL specs have been stable for 3+ years and rarely break due to code changes -# - Integration specs test OS library compatibility, which changes with system updates +# - SSL tests have been stable for 3+ years and rarely break due to code changes +# - Integration tests test OS library compatibility, which changes with system updates # - These tests catch regressions from external changes (OpenSSL updates, system libs) # - Running every 3 days to prevent these slower tests from blocking PR velocity # - Manual triggering allows testing workflow changes before they go into schedule @@ -18,14 +18,14 @@ # - Forward compatibility testing with Trixie (Debian testing/future stable) # - Catching SSL regressions from package updates in both stable and testing # -# SSL TESTING (specs_install + specs_precompiled): +# SSL TESTING (tests_install + tests_precompiled): # - Tests SSL/TLS connectivity with Kafka using docker-compose-ssl.yml # - Validates certificate handling and SSL handshakes across Ruby and Debian versions # - Ensures SSL works with both compiled-from-source and precompiled flows # - Catches OpenSSL version compatibility issues and SSL library regressions # - Tests real SSL scenarios that mirror production deployments # -# INTEGRATION TESTING (integration specs in both jobs): +# INTEGRATION TESTING (integration tests in both jobs): # - Tests system library compatibility without requiring Kafka infrastructure # - Validates libssl, libsasl2, libzstd, zlib integration across OS versions # - Ensures native extensions work with different system library versions @@ -54,7 +54,7 @@ on: pull_request: paths: - '.github/workflows/ci_linux_debian_x86_64_gnu_complementary.yml' - - 'spec/integrations/**' + - 'test/integrations/**' branches: [ master ] push: branches: @@ -128,7 +128,7 @@ jobs: path: ext/ retention-days: 1 - specs_install: + tests_install: timeout-minutes: 50 strategy: fail-fast: false @@ -194,7 +194,7 @@ jobs: bundle install && \ cd ext && bundle exec rake' - - name: Run all specs in SSL (compiled flow) + - name: Run all tests in SSL (compiled flow) env: RDKAFKA_EXT_PATH: ${{ github.workspace }}/ext KAFKA_SSL_ENABLED: "true" @@ -216,13 +216,13 @@ jobs: echo "=== SSL Library Versions (${{ matrix.debian }}) ===" && \ openssl version && \ dpkg -l | grep -E "(libssl|openssl)" && \ - echo "=== Running SSL Specs (Compiled) ===" && \ - bundle exec rspec' + echo "=== Running SSL Tests (Compiled) ===" && \ + bundle exec rake test' - name: Verify Kafka warnings run: bin/verify_kafka_warnings - - name: Run integration specs (compiled flow) + - name: Run integration tests (compiled flow) run: | docker run --rm \ -v "${{ github.workspace }}:/workspace" \ @@ -238,13 +238,13 @@ jobs: echo "=== OS Library Versions (${{ matrix.debian }}) ===" && \ openssl version && \ dpkg -l | grep -E "(libssl|libsasl|libzstd|zlib)" && \ - echo "=== Running Integration Specs (Compiled) ===" && \ - for file in $(ls spec/integrations/*_spec.rb); do \ + echo "=== Running Integration Tests (Compiled) ===" && \ + for file in $(ls test/integrations/*_test.rb); do \ echo "Running $file with Ruby ${{ matrix.ruby }} on ${{ matrix.debian }}"; \ bundle exec ruby "$file" || exit 1; \ done' - specs_precompiled: + tests_precompiled: timeout-minutes: 50 needs: build_precompiled strategy: @@ -282,7 +282,7 @@ jobs: sleep 2 done - - name: Run specs with precompiled library and SSL + - name: Run tests with precompiled library and SSL env: RDKAFKA_EXT_PATH: ${{ github.workspace }}/ext KAFKA_SSL_ENABLED: "true" @@ -304,13 +304,13 @@ jobs: echo "=== SSL Library Versions (${{ matrix.debian }}) ===" && \ openssl version && \ dpkg -l | grep -E "(libssl|openssl)" && \ - echo "=== Running SSL Specs (Precompiled) ===" && \ - bundle exec rspec' + echo "=== Running SSL Tests (Precompiled) ===" && \ + bundle exec rake test' - name: Verify Kafka warnings run: bin/verify_kafka_warnings - - name: Run integration specs (precompiled flow) + - name: Run integration tests (precompiled flow) env: RDKAFKA_EXT_PATH: ${{ github.workspace }}/ext RDKAFKA_PRECOMPILED: "true" @@ -331,8 +331,8 @@ jobs: echo "=== OS Library Versions (${{ matrix.debian }}) ===" && \ openssl version && \ dpkg -l | grep -E "(libssl|libsasl|libzstd|zlib)" && \ - echo "=== Running Integration Specs (Precompiled) ===" && \ - for file in $(ls spec/integrations/*_spec.rb); do \ + echo "=== Running Integration Tests (Precompiled) ===" && \ + for file in $(ls test/integrations/*_test.rb); do \ echo "Running $file with Ruby ${{ matrix.ruby }} on ${{ matrix.debian }} (precompiled)"; \ bundle exec ruby "$file" || exit 1; \ done' @@ -342,9 +342,9 @@ jobs: runs-on: ubuntu-latest if: always() needs: - - specs_install + - tests_install - build_precompiled - - specs_precompiled + - tests_precompiled steps: - name: Check all jobs passed if: | diff --git a/.github/workflows/ci_linux_ubuntu_aarch64_gnu.yml b/.github/workflows/ci_linux_ubuntu_aarch64_gnu.yml index 2b2c68e5..943c2e64 100644 --- a/.github/workflows/ci_linux_ubuntu_aarch64_gnu.yml +++ b/.github/workflows/ci_linux_ubuntu_aarch64_gnu.yml @@ -11,13 +11,13 @@ # - Real-world deployment scenarios where users run on various Ubuntu LTS versions # - Different Ruby versions # -# COMPILATION FLOW (build_install + specs_install): +# COMPILATION FLOW (build_install + tests_install): # - Tests that librdkafka compiles correctly from source on each Ubuntu version # - Validates that mini_portile2 can successfully build native dependencies # - Ensures Ruby native extensions link properly with system libraries # - Verifies that the same codebase works across different toolchain versions # -# PRECOMPILED FLOW (build_precompiled + specs_precompiled): +# PRECOMPILED FLOW (build_precompiled + tests_precompiled): # - Tests our precompiled static libraries work on different Ubuntu versions # - Validates that statically-linked binaries are truly portable across environments # - Ensures precompiled libraries don't have unexpected system dependencies @@ -52,7 +52,7 @@ env: BUNDLE_JOBS: 4 jobs: - specs_install: + tests_install: timeout-minutes: 45 strategy: fail-fast: false @@ -114,20 +114,20 @@ jobs: set -e cd ext && bundle exec rake cd .. - - name: Run all specs in PLAINTEXT + - name: Run all tests in PLAINTEXT env: GITHUB_COVERAGE: ${{matrix.coverage}} RDKAFKA_EXT_PATH: ${{ github.workspace }}/ext continue-on-error: ${{ matrix.continue-on-error || false }} run: | - bundle exec rspec + bundle exec rake test - name: Verify Kafka warnings run: bin/verify_kafka_warnings build_precompiled: timeout-minutes: 30 - # We precompile on older Ubuntu and check compatibility by running specs since we aim to + # We precompile on older Ubuntu and check compatibility by running tests since we aim to # release only one precompiled version for all supported Ubuntu versions # This is why we do not want Renovate to update it automatically runs-on: ubuntu-22.04-arm # renovate: ignore @@ -180,7 +180,7 @@ jobs: path: ext/ retention-days: 1 - specs_precompiled: + tests_precompiled: timeout-minutes: 30 needs: build_precompiled strategy: @@ -262,13 +262,13 @@ jobs: echo "Build dependencies removal completed" echo "Remaining build tools:" which gcc g++ make 2>/dev/null || echo "No build tools found in PATH (good!)" - - name: Run specs with precompiled library and PLAINTEXT + - name: Run tests with precompiled library and PLAINTEXT env: GITHUB_COVERAGE: ${{ matrix.coverage }} RDKAFKA_EXT_PATH: ${{ github.workspace }}/ext RDKAFKA_PRECOMPILED: "true" run: | - bundle exec rspec + bundle exec rake test - name: Verify Kafka warnings run: bin/verify_kafka_warnings @@ -278,9 +278,9 @@ jobs: runs-on: ubuntu-latest if: always() needs: - - specs_install + - tests_install - build_precompiled - - specs_precompiled + - tests_precompiled steps: - name: Check all jobs passed if: | diff --git a/.github/workflows/ci_linux_ubuntu_aarch64_gnu_complementary.yml b/.github/workflows/ci_linux_ubuntu_aarch64_gnu_complementary.yml index 85ee1518..2df199b1 100644 --- a/.github/workflows/ci_linux_ubuntu_aarch64_gnu_complementary.yml +++ b/.github/workflows/ci_linux_ubuntu_aarch64_gnu_complementary.yml @@ -8,13 +8,13 @@ # WHY COMPLEMENTARY TESTING FOR AARCH64: # - ARM64 precompiled binary testing is stable (rarely breaks from code changes) # - Edge Ruby versions (4.0-preview, JRuby) need testing but shouldn't block PRs -# - Integration specs test OS library compatibility, which changes with system updates +# - Integration tests test OS library compatibility, which changes with system updates # - ARM64 architecture has specific compilation and linking considerations # - These tests catch regressions from external changes (system libs, Ruby updates) # - Running every 3 days to prevent these slower tests from blocking PR velocity # - Manual triggering allows testing workflow changes before they go into schedule # -# PRECOMPILED BINARY TESTING (build_precompiled + specs_precompiled): +# PRECOMPILED BINARY TESTING (build_precompiled + tests_precompiled): # - Tests static library distribution works across Ubuntu versions on ARM64 # - Validates precompiled libraries don't have unexpected system dependencies # - Ensures binary compatibility between different Ubuntu LTS versions on aarch64 @@ -23,7 +23,7 @@ # # EXTENDED COMPATIBILITY TESTING: # - Edge case Ruby versions: 4.0, JRuby-10.0 on ARM64 -# - Integration specs testing system library compatibility +# - Integration tests testing system library compatibility # - Both compilation and precompiled flows for comprehensive coverage # - Cross-platform compatibility validation for aarch64 architecture # @@ -49,7 +49,7 @@ on: pull_request: paths: - '.github/workflows/ci_linux_ubuntu_aarch64_gnu_complementary.yml' - - 'spec/integrations/**' + - 'test/integrations/**' branches: [ master ] push: branches: @@ -67,7 +67,7 @@ env: jobs: build_precompiled: timeout-minutes: 30 - # We precompile on older Ubuntu and check compatibility by running specs since we aim to + # We precompile on older Ubuntu and check compatibility by running tests since we aim to # release only one precompiled version for all supported Ubuntu versions # This is why we do not want Renovate to update it automatically runs-on: ubuntu-22.04-arm # renovate: ignore @@ -120,7 +120,7 @@ jobs: path: ext/ retention-days: 1 - specs_install: + tests_install: timeout-minutes: 50 strategy: fail-fast: false @@ -184,26 +184,26 @@ jobs: set -e cd ext && bundle exec rake cd .. - - name: Run all specs in SSL (compiled flow) + - name: Run all tests in SSL (compiled flow) env: RDKAFKA_EXT_PATH: ${{ github.workspace }}/ext KAFKA_SSL_ENABLED: "true" continue-on-error: ${{ matrix.continue-on-error || false }} run: | - bundle exec rspec + bundle exec rake test - name: Verify Kafka warnings run: bin/verify_kafka_warnings - - name: Run integration specs (compiled flow) + - name: Run integration tests (compiled flow) continue-on-error: ${{ matrix.continue-on-error || false }} run: | - for file in $(ls spec/integrations/*_spec.rb); do + for file in $(ls test/integrations/*_test.rb); do echo "Running $file with Ruby ${{ matrix.ruby }}"; bundle exec ruby "$file" || exit 1; done - specs_precompiled: + tests_precompiled: timeout-minutes: 30 needs: build_precompiled strategy: @@ -281,23 +281,23 @@ jobs: echo "Build dependencies removal completed" echo "Remaining build tools:" which gcc g++ make 2>/dev/null || echo "No build tools found in PATH (good!)" - - name: Run specs with precompiled library and SSL + - name: Run tests with precompiled library and SSL env: RDKAFKA_EXT_PATH: ${{ github.workspace }}/ext KAFKA_SSL_ENABLED: "true" RDKAFKA_PRECOMPILED: "true" run: | - bundle exec rspec + bundle exec rake test - name: Verify Kafka warnings run: bin/verify_kafka_warnings - - name: Run integration specs (precompiled flow) + - name: Run integration tests (precompiled flow) env: RDKAFKA_EXT_PATH: ${{ github.workspace }}/ext RDKAFKA_PRECOMPILED: "true" run: | - for file in $(ls spec/integrations/*_spec.rb); do + for file in $(ls test/integrations/*_test.rb); do echo "Running $file with Ruby ${{ matrix.ruby }} (precompiled)"; bundle exec ruby "$file" || exit 1; done @@ -307,9 +307,9 @@ jobs: runs-on: ubuntu-latest if: always() needs: - - specs_install + - tests_install - build_precompiled - - specs_precompiled + - tests_precompiled steps: - name: Check all jobs passed if: | diff --git a/.github/workflows/ci_linux_ubuntu_x86_64_gnu.yml b/.github/workflows/ci_linux_ubuntu_x86_64_gnu.yml index 55acd751..d8b80f72 100644 --- a/.github/workflows/ci_linux_ubuntu_x86_64_gnu.yml +++ b/.github/workflows/ci_linux_ubuntu_x86_64_gnu.yml @@ -11,13 +11,13 @@ # - Real-world deployment scenarios where users run on various Ubuntu LTS versions # - Different Ruby versions # -# COMPILATION FLOW (build_install + specs_install): +# COMPILATION FLOW (build_install + tests_install): # - Tests that librdkafka compiles correctly from source on each Ubuntu version # - Validates that mini_portile2 can successfully build native dependencies # - Ensures Ruby native extensions link properly with system libraries # - Verifies that the same codebase works across different toolchain versions # -# PRECOMPILED FLOW (build_precompiled + specs_precompiled): +# PRECOMPILED FLOW (build_precompiled + tests_precompiled): # - Tests our precompiled static libraries work on different Ubuntu versions # - Validates that statically-linked binaries are truly portable across environments # - Ensures precompiled libraries don't have unexpected system dependencies @@ -52,7 +52,7 @@ env: BUNDLE_JOBS: 4 jobs: - specs_install: + tests_install: timeout-minutes: 45 strategy: fail-fast: false @@ -115,29 +115,29 @@ jobs: set -e cd ext && bundle exec rake cd .. - - name: Run all specs in PLAINTEXT + - name: Run all tests in PLAINTEXT env: GITHUB_COVERAGE: ${{matrix.coverage}} RDKAFKA_EXT_PATH: ${{ github.workspace }}/ext continue-on-error: ${{ matrix.continue-on-error || false }} run: | - bundle exec rspec + bundle exec rake test - - name: Run all specs in SSL + - name: Run all tests in SSL env: GITHUB_COVERAGE: ${{matrix.coverage}} RDKAFKA_EXT_PATH: ${{ github.workspace }}/ext KAFKA_SSL_ENABLED: "true" continue-on-error: ${{ matrix.continue-on-error || false }} run: | - bundle exec rspec + bundle exec rake test - name: Verify Kafka warnings run: bin/verify_kafka_warnings build_precompiled: timeout-minutes: 30 - # We precompile on older Ubuntu and check compatibility by running specs since we aim to + # We precompile on older Ubuntu and check compatibility by running tests since we aim to # release only one precompiled version for all supported Ubuntu versions # This is why we do not want Renovate to update it automatically runs-on: ubuntu-22.04 # renovate: ignore @@ -190,7 +190,7 @@ jobs: path: ext/ retention-days: 1 - specs_precompiled: + tests_precompiled: timeout-minutes: 30 needs: build_precompiled strategy: @@ -272,13 +272,13 @@ jobs: echo "Build dependencies removal completed" echo "Remaining build tools:" which gcc g++ make 2>/dev/null || echo "No build tools found in PATH (good!)" - - name: Run specs with precompiled library and PLAINTEXT + - name: Run tests with precompiled library and PLAINTEXT env: GITHUB_COVERAGE: ${{ matrix.coverage }} RDKAFKA_EXT_PATH: ${{ github.workspace }}/ext RDKAFKA_PRECOMPILED: "true" run: | - bundle exec rspec + bundle exec rake test - name: Verify Kafka warnings run: bin/verify_kafka_warnings @@ -288,9 +288,9 @@ jobs: runs-on: ubuntu-latest if: always() needs: - - specs_install + - tests_install - build_precompiled - - specs_precompiled + - tests_precompiled steps: - name: Check all jobs passed if: | diff --git a/.github/workflows/ci_linux_ubuntu_x86_64_gnu_complementary.yml b/.github/workflows/ci_linux_ubuntu_x86_64_gnu_complementary.yml index 6203e4f0..9f6274ac 100644 --- a/.github/workflows/ci_linux_ubuntu_x86_64_gnu_complementary.yml +++ b/.github/workflows/ci_linux_ubuntu_x86_64_gnu_complementary.yml @@ -8,12 +8,12 @@ # WHY COMPLEMENTARY TESTING: # - Precompiled binary testing is stable (rarely breaks from code changes) # - Edge Ruby versions (4.0-preview, JRuby) need testing but shouldn't block PRs -# - Integration specs test OS library compatibility, which changes with system updates +# - Integration tests test OS library compatibility, which changes with system updates # - These tests catch regressions from external changes (system libs, Ruby updates) # - Running every 3 days to prevent these slower tests from blocking PR velocity # - Manual triggering allows testing workflow changes before they go into schedule # -# PRECOMPILED BINARY TESTING (build_precompiled + specs_precompiled): +# PRECOMPILED BINARY TESTING (build_precompiled + tests_precompiled): # - Tests static library distribution works across Ubuntu versions # - Validates precompiled libraries don't have unexpected system dependencies # - Ensures binary compatibility between different Ubuntu LTS versions @@ -22,7 +22,7 @@ # # EXTENDED COMPATIBILITY TESTING: # - Edge case Ruby versions: 4.0, JRuby-10.0 -# - Integration specs testing system library compatibility +# - Integration tests testing system library compatibility # - Both compilation and precompiled flows for comprehensive coverage # - Cross-platform compatibility validation # @@ -48,7 +48,7 @@ on: pull_request: paths: - '.github/workflows/ci_linux_ubuntu_x86_64_gnu_complementary.yml' - - 'spec/integrations/**' + - 'test/integrations/**' branches: [ master ] push: branches: @@ -66,7 +66,7 @@ env: jobs: build_precompiled: timeout-minutes: 30 - # We precompile on older Ubuntu and check compatibility by running specs since we aim to + # We precompile on older Ubuntu and check compatibility by running tests since we aim to # release only one precompiled version for all supported Ubuntu versions # This is why we do not want Renovate to update it automatically runs-on: ubuntu-22.04 # renovate: ignore @@ -119,7 +119,7 @@ jobs: path: ext/ retention-days: 1 - specs_install: + tests_install: timeout-minutes: 50 strategy: fail-fast: false @@ -183,26 +183,26 @@ jobs: set -e cd ext && bundle exec rake cd .. - - name: Run all specs in SSL (compiled flow) + - name: Run all tests in SSL (compiled flow) env: RDKAFKA_EXT_PATH: ${{ github.workspace }}/ext KAFKA_SSL_ENABLED: "true" continue-on-error: ${{ matrix.continue-on-error || false }} run: | - bundle exec rspec + bundle exec rake test - name: Verify Kafka warnings run: bin/verify_kafka_warnings - - name: Run integration specs (compiled flow) + - name: Run integration tests (compiled flow) continue-on-error: ${{ matrix.continue-on-error || false }} run: | - for file in $(ls spec/integrations/*_spec.rb); do + for file in $(ls test/integrations/*_test.rb); do echo "Running $file with Ruby ${{ matrix.ruby }}"; bundle exec ruby "$file" || exit 1; done - specs_precompiled: + tests_precompiled: timeout-minutes: 30 needs: build_precompiled strategy: @@ -280,23 +280,23 @@ jobs: echo "Build dependencies removal completed" echo "Remaining build tools:" which gcc g++ make 2>/dev/null || echo "No build tools found in PATH (good!)" - - name: Run specs with precompiled library and SSL + - name: Run tests with precompiled library and SSL env: RDKAFKA_EXT_PATH: ${{ github.workspace }}/ext KAFKA_SSL_ENABLED: "true" RDKAFKA_PRECOMPILED: "true" run: | - bundle exec rspec + bundle exec rake test - name: Verify Kafka warnings run: bin/verify_kafka_warnings - - name: Run integration specs (precompiled flow) + - name: Run integration tests (precompiled flow) env: RDKAFKA_EXT_PATH: ${{ github.workspace }}/ext RDKAFKA_PRECOMPILED: "true" run: | - for file in $(ls spec/integrations/*_spec.rb); do + for file in $(ls test/integrations/*_test.rb); do echo "Running $file with Ruby ${{ matrix.ruby }} (precompiled)"; bundle exec ruby "$file" || exit 1; done @@ -306,9 +306,9 @@ jobs: runs-on: ubuntu-latest if: always() needs: - - specs_install + - tests_install - build_precompiled - - specs_precompiled + - tests_precompiled steps: - name: Check all jobs passed if: | diff --git a/.github/workflows/ci_macos_arm64.yml b/.github/workflows/ci_macos_arm64.yml index 5533cfbb..4fdead0e 100644 --- a/.github/workflows/ci_macos_arm64.yml +++ b/.github/workflows/ci_macos_arm64.yml @@ -20,7 +20,7 @@ env: CONFLUENT_VERSION: "8.0.0" jobs: - specs_install: + tests_install: timeout-minutes: 45 strategy: fail-fast: false @@ -127,12 +127,12 @@ jobs: set -e cd ext && bundle exec rake cd .. - - name: Run all specs + - name: Run all tests env: GITHUB_COVERAGE: ${{matrix.coverage}} RDKAFKA_EXT_PATH: ${{ github.workspace }}/ext run: | - bundle exec rspec + bundle exec rake test build_precompiled: timeout-minutes: 45 @@ -162,7 +162,7 @@ jobs: path: ext/ retention-days: 1 - specs_precompiled: + tests_precompiled: timeout-minutes: 30 needs: build_precompiled strategy: @@ -272,22 +272,22 @@ jobs: run: | bundle install echo "Bundle install completed with precompiled library" - - name: Run specs with precompiled library + - name: Run tests with precompiled library env: GITHUB_COVERAGE: ${{ matrix.coverage }} RDKAFKA_EXT_PATH: ${{ github.workspace }}/ext RDKAFKA_PRECOMPILED: "true" run: | - bundle exec rspec + bundle exec rake test ci-success: name: CI macOS ARM64 Success runs-on: ubuntu-latest if: always() needs: - - specs_install + - tests_install - build_precompiled - - specs_precompiled + - tests_precompiled steps: - name: Check all jobs passed if: | diff --git a/.rspec b/.rspec deleted file mode 100644 index 0ba55b4d..00000000 --- a/.rspec +++ /dev/null @@ -1,4 +0,0 @@ ---require spec_helper ---format documentation ---pattern spec/lib/**/*_spec.rb ---order rand diff --git a/Gemfile b/Gemfile index 77d571ab..a881ad4b 100644 --- a/Gemfile +++ b/Gemfile @@ -7,7 +7,8 @@ gemspec group :development do gem "ostruct" gem "pry" - gem "rspec" + gem "minitest" + gem "mocha" gem "simplecov" gem "warning" end diff --git a/Rakefile b/Rakefile index 3c4cdf9e..a3446159 100644 --- a/Rakefile +++ b/Rakefile @@ -2,6 +2,13 @@ require "bundler/gem_tasks" require "./lib/rdkafka" +require "minitest/test_task" + +Minitest::TestTask.create(:test) do |t| + t.test_globs = ["test/lib/**/*_test.rb"] +end + +task default: :test desc "Generate some message traffic" task :produce_messages do diff --git a/rdkafka.gemspec b/rdkafka.gemspec index 239dcaa2..dc77adf4 100644 --- a/rdkafka.gemspec +++ b/rdkafka.gemspec @@ -18,6 +18,7 @@ Gem::Specification.new do |gem| files = files.reject do |file| next true if file.start_with?(".") next true if file.start_with?("spec/") + next true if file.start_with?("test/") next true if file.start_with?("ext/README.md") false diff --git a/spec/integrations/librdkafka_admin_features_spec.rb b/spec/integrations/librdkafka_admin_features_spec.rb deleted file mode 100644 index 7ff0e6fd..00000000 --- a/spec/integrations/librdkafka_admin_features_spec.rb +++ /dev/null @@ -1,84 +0,0 @@ -# frozen_string_literal: true - -# This integration test verifies that librdkafka admin is compiled with all expected builtin features. -# These features are critical for Karafka and rdkafka-ruby to function properly. -# -# Exit codes: -# - 0: All expected features found (test passes) -# - 1: Missing expected features or parsing failed (test fails) - -require "rdkafka" -require "logger" -require "stringio" - -$stdout.sync = true - -# Expected features that should be present in our compiled librdkafka -EXPECTED_BUILTIN_FEATURES = %w[ - gzip - snappy - ssl - sasl - regex - lz4 - sasl_plain - sasl_scram - plugins - zstd - sasl_oauthbearer -].freeze - -# Precompiled builds include GSSAPI (via MIT Kerberos + Cyrus SASL) -PRECOMPILED_FEATURES = (EXPECTED_BUILTIN_FEATURES + %w[sasl_gssapi]).freeze - -captured_output = StringIO.new -logger = Logger.new(captured_output) -logger.level = Logger::DEBUG - -Rdkafka::Config.logger = logger -Rdkafka::Config.ensure_log_thread - -config = Rdkafka::Config.new( - "bootstrap.servers": "localhost:9092", - "client.id": "admin-feature-test", - debug: "all" -) - -admin = config.admin - -# Wait for log messages to be processed -sleep 2 - -admin.close - -# Get all log output -log_content = captured_output.string - -# Find the initialization line that contains builtin.features -feature_line = log_content.lines.find { |line| line.include?("builtin.features") } - -unless feature_line - - exit(1) -end - -# Extract the features list from the line -# Format: "... (builtin.features gzip,snappy,ssl,..., ...)" -match = feature_line.match(/builtin\.features\s+([^,]+(?:,[^,\s]+)*)/i) - -unless match - - exit(1) -end - -features_string = match[1] -actual_features = features_string.split(",").map(&:strip) - -# Verify all expected features are present -expected = (ENV["RDKAFKA_PRECOMPILED"] == "true") ? PRECOMPILED_FEATURES : EXPECTED_BUILTIN_FEATURES -missing_features = expected - actual_features - -if missing_features.any? - - exit(1) -end diff --git a/spec/integrations/librdkafka_consumer_features_spec.rb b/spec/integrations/librdkafka_consumer_features_spec.rb deleted file mode 100644 index 2ff0edcd..00000000 --- a/spec/integrations/librdkafka_consumer_features_spec.rb +++ /dev/null @@ -1,85 +0,0 @@ -# frozen_string_literal: true - -# This integration test verifies that librdkafka consumer is compiled with all expected builtin features. -# These features are critical for Karafka and rdkafka-ruby to function properly. -# -# Exit codes: -# - 0: All expected features found (test passes) -# - 1: Missing expected features or parsing failed (test fails) - -require "rdkafka" -require "logger" -require "stringio" - -$stdout.sync = true - -# Expected features that should be present in our compiled librdkafka -EXPECTED_BUILTIN_FEATURES = %w[ - gzip - snappy - ssl - sasl - regex - lz4 - sasl_plain - sasl_scram - plugins - zstd - sasl_oauthbearer -].freeze - -# Precompiled builds include GSSAPI (via MIT Kerberos + Cyrus SASL) -PRECOMPILED_FEATURES = (EXPECTED_BUILTIN_FEATURES + %w[sasl_gssapi]).freeze - -captured_output = StringIO.new -logger = Logger.new(captured_output) -logger.level = Logger::DEBUG - -Rdkafka::Config.logger = logger -Rdkafka::Config.ensure_log_thread - -config = Rdkafka::Config.new( - "bootstrap.servers": "localhost:9092", - "client.id": "consumer-feature-test", - "group.id": "feature-test-group", - debug: "all" -) - -consumer = config.consumer - -# Wait for log messages to be processed -sleep 2 - -consumer.close - -# Get all log output -log_content = captured_output.string - -# Find the initialization line that contains builtin.features -feature_line = log_content.lines.find { |line| line.include?("builtin.features") } - -unless feature_line - - exit(1) -end - -# Extract the features list from the line -# Format: "... (builtin.features gzip,snappy,ssl,..., ...)" -match = feature_line.match(/builtin\.features\s+([^,]+(?:,[^,\s]+)*)/i) - -unless match - - exit(1) -end - -features_string = match[1] -actual_features = features_string.split(",").map(&:strip) - -# Verify all expected features are present -expected = (ENV["RDKAFKA_PRECOMPILED"] == "true") ? PRECOMPILED_FEATURES : EXPECTED_BUILTIN_FEATURES -missing_features = expected - actual_features - -if missing_features.any? - - exit(1) -end diff --git a/spec/integrations/librdkafka_producer_features_spec.rb b/spec/integrations/librdkafka_producer_features_spec.rb deleted file mode 100644 index f21e6306..00000000 --- a/spec/integrations/librdkafka_producer_features_spec.rb +++ /dev/null @@ -1,84 +0,0 @@ -# frozen_string_literal: true - -# This integration test verifies that librdkafka producer is compiled with all expected builtin features. -# These features are critical for Karafka and rdkafka-ruby to function properly. -# -# Exit codes: -# - 0: All expected features found (test passes) -# - 1: Missing expected features or parsing failed (test fails) - -require "rdkafka" -require "logger" -require "stringio" - -$stdout.sync = true - -# Expected features that should be present in our compiled librdkafka -EXPECTED_BUILTIN_FEATURES = %w[ - gzip - snappy - ssl - sasl - regex - lz4 - sasl_plain - sasl_scram - plugins - zstd - sasl_oauthbearer -].freeze - -# Precompiled builds include GSSAPI (via MIT Kerberos + Cyrus SASL) -PRECOMPILED_FEATURES = (EXPECTED_BUILTIN_FEATURES + %w[sasl_gssapi]).freeze - -captured_output = StringIO.new -logger = Logger.new(captured_output) -logger.level = Logger::DEBUG - -Rdkafka::Config.logger = logger -Rdkafka::Config.ensure_log_thread - -config = Rdkafka::Config.new( - "bootstrap.servers": "localhost:9092", - "client.id": "producer-feature-test", - debug: "all" -) - -producer = config.producer - -# Wait for log messages to be processed -sleep 2 - -producer.close - -# Get all log output -log_content = captured_output.string - -# Find the initialization line that contains builtin.features -feature_line = log_content.lines.find { |line| line.include?("builtin.features") } - -unless feature_line - - exit(1) -end - -# Extract the features list from the line -# Format: "... (builtin.features gzip,snappy,ssl,..., ...)" -match = feature_line.match(/builtin\.features\s+([^,]+(?:,[^,\s]+)*)/i) - -unless match - - exit(1) -end - -features_string = match[1] -actual_features = features_string.split(",").map(&:strip) - -# Verify all expected features are present -expected = (ENV["RDKAFKA_PRECOMPILED"] == "true") ? PRECOMPILED_FEATURES : EXPECTED_BUILTIN_FEATURES -missing_features = expected - actual_features - -if missing_features.any? - - exit(1) -end diff --git a/spec/integrations/ssl_stress_spec.rb b/spec/integrations/ssl_stress_spec.rb deleted file mode 100644 index 4112ae51..00000000 --- a/spec/integrations/ssl_stress_spec.rb +++ /dev/null @@ -1,112 +0,0 @@ -# ssl_stress_test.rb -# -# This script is designed to stress-test the OpenSSL SSL/TLS layer under high concurrency -# to help detect regressions like the one described in OpenSSL issue #28171: -# https://github.com/openssl/openssl/issues/28171 -# -# Issue summary: -# - OpenSSL 3.0.17 introduced a concurrency-related regression. -# - Multiple threads sharing the same SSL_CTX and making parallel TLS connections -# (often with certificate verification enabled) can cause segmentation faults -# due to race conditions in X509 store handling. -# - Affected users include Python (httpx), Rust (reqwest, native-tls), and C applications. -# -# Script details: -# - Starts 100 SSL servers using self-signed, in-memory certs on sequential localhost ports. -# - Uses `rdkafka-ruby` to spin up 100 consumer threads that continuously create and destroy -# SSL connections to these servers for a given duration. -# - This mimics high TLS connection churn and aims to trigger latent SSL_CTX or X509_STORE -# threading bugs like double-frees, memory corruption, or segmentation faults. -# -# Goal: -# - Catch regressions early by validating that heavy concurrent SSL use does not lead to crashes. -# - Provide a minimal and repeatable reproducer when diagnosing OpenSSL-level SSL instability. -# -# In case of a failure, segfault will happen - -require "rdkafka" -require "socket" -require "openssl" - -$stdout.sync = true - -STARTING_PORT = 19093 -NUM_PORTS = 150 -BATCHES = 100 -PORTS = STARTING_PORT...(STARTING_PORT + NUM_PORTS) - -CONFIG = { - "bootstrap.servers": Array.new(NUM_PORTS) { |i| "127.0.0.1:#{19093 + i}" }.join(","), - "security.protocol": "SSL", - "enable.ssl.certificate.verification": false -} - -# Generate in-memory self-signed cert -key = OpenSSL::PKey::RSA.new(2048) - -name = OpenSSL::X509::Name.parse("/CN=127.0.0.1") -cert = OpenSSL::X509::Certificate.new -cert.version = 2 -cert.serial = 1 -cert.subject = name -cert.issuer = name -cert.public_key = key.public_key -cert.not_before = Time.now -cert.not_after = Time.now + 3600 -cert.sign(key, OpenSSL::Digest.new("SHA256")) - -# Start servers on multiple ports -PORTS.map do |port| - Thread.new do - # Prepare SSL context - # We do not use a shared context for the server because the goal is to stress librdkafka layer - # and not the Ruby SSL layer - ssl_context = OpenSSL::SSL::SSLContext.new - ssl_context.cert = cert - ssl_context.key = key - - tcp_server = TCPServer.new("127.0.0.1", port) - ssl_server = OpenSSL::SSL::SSLServer.new(tcp_server, ssl_context) - - loop do - ssl_socket = ssl_server.accept - ssl_socket.close - rescue => e - # Some errors are expected and irrelevant - next if e.message.include?("unexpected eof while reading") - end - end -end - -timeout = 30 -start = Time.now - -# Wait for the servers to be available -# We want to make sure that they are available so we are sure that librdkafka actually hammers -# them -loop do - all_up = PORTS.all? do |port| - TCPSocket.new("127.0.0.1", port).close - true - rescue - false - end - - break if all_up - - raise "Timeout waiting for SSL servers" if Time.now - start > timeout - - sleep 0.1 -end - -start_time = Time.now -duration = 60 * 10 # 10 minutes - it should crash faster than that if SSL vulnerable - -while Time.now - start_time < duration - css = Array.new(BATCHES) { Rdkafka::Config.new(CONFIG) } - csss = css.map(&:consumer) - # This print is needed. No idea why but it increases the chances of segfault - - sleep(1) - csss.each(&:close) -end diff --git a/spec/integrations/unregistered_scheme_file_spec.rb b/spec/integrations/unregistered_scheme_file_spec.rb deleted file mode 100644 index dd1a7c4c..00000000 --- a/spec/integrations/unregistered_scheme_file_spec.rb +++ /dev/null @@ -1,118 +0,0 @@ -# frozen_string_literal: true - -# This integration test verifies that rdkafka properly detects and reports specific SSL -# configuration errors when attempting to connect to an SSL-enabled Kafka broker. -# -# It also ensures that we do not statically link ssl certs into incorrect tmp cert location. -# -# These errors occur when rdkafka's underlying OpenSSL library encounters issues -# with SSL certificate validation, particularly related to file scheme handling -# and missing certificate directories. -# -# Exit codes: -# - 0: Target error messages NOT detected after 5 seconds (test fails - errors missing) -# - 1: Target error messages detected (test passes - errors are present as expected) -# - 2: Unexpected exception occurred during test execution - -require "rdkafka" -require "socket" -require "openssl" -require "stringio" -require "logger" - -$stdout.sync = true - -captured_output = StringIO.new -Rdkafka::Config.logger = Logger.new(captured_output) - -# Start a dummy SSL server with self-signed certificate -ssl_server_thread = Thread.new do - # Create TCP server - tcp_server = TCPServer.new("localhost", 9099) - - # Generate self-signed certificate - key = OpenSSL::PKey::RSA.new(2048) - cert = OpenSSL::X509::Certificate.new - cert.version = 2 - cert.serial = 1 - cert.subject = OpenSSL::X509::Name.parse("/DC=org/DC=ruby-test/CN=localhost") - cert.issuer = cert.subject - cert.public_key = key.public_key - cert.not_before = Time.now - cert.not_after = cert.not_before + 365 * 24 * 60 * 60 # 1 year - - # Add extensions - ef = OpenSSL::X509::ExtensionFactory.new - ef.subject_certificate = cert - ef.issuer_certificate = cert - cert.add_extension(ef.create_extension("basicConstraints", "CA:TRUE", true)) - cert.add_extension(ef.create_extension("keyUsage", "keyCertSign, cRLSign", true)) - cert.add_extension(ef.create_extension("subjectKeyIdentifier", "hash", false)) - cert.add_extension(ef.create_extension("authorityKeyIdentifier", "keyid:always", false)) - - cert.sign(key, OpenSSL::Digest.new("SHA256")) - - # Create SSL context - ssl_context = OpenSSL::SSL::SSLContext.new - ssl_context.cert = cert - ssl_context.key = key - - # Wrap TCP server with SSL - ssl_server = OpenSSL::SSL::SSLServer.new(tcp_server, ssl_context) - - loop do - client = ssl_server.accept - client.puts("Invalid Kafka broker") - client.close - rescue - # Ignore SSL server errors - they're expected - end -rescue -end - -# Give the server time to start -sleep 1 - -# Try connecting to the dummy SSL server -config = Rdkafka::Config.new( - "bootstrap.servers": "localhost:9099", - "security.protocol": "SSL", - "client.id": "test-client", - "group.id": "test-group" -) - -begin - consumer = config.consumer - - consumer.subscribe("test-topic") - - # Try to poll for messages - this triggers SSL errors - start_time = Time.now - timeout = 5 - - while Time.now - start_time < timeout - begin - consumer.poll(1000) - rescue - break - end - end - - # Wait for rdkafka to finish logging errors - sleep 2 - - # Check captured logs for target error patterns - captured_output.rewind - captured_output.readlines.each do |line| - exit(1) if line.include?("routines::unregistered scheme") - exit(1) if line.include?("system library::No such file or directory") - end -rescue - exit(2) -ensure - consumer&.close if defined?(consumer) && consumer - ssl_server_thread&.kill -end - -# Exit with 0 if target errors not detected -exit(0) diff --git a/spec/lib/rdkafka/abstract_handle_spec.rb b/spec/lib/rdkafka/abstract_handle_spec.rb deleted file mode 100644 index ae5e2466..00000000 --- a/spec/lib/rdkafka/abstract_handle_spec.rb +++ /dev/null @@ -1,153 +0,0 @@ -# frozen_string_literal: true - -RSpec.describe Rdkafka::AbstractHandle do - let(:handle) do - TestHandle.new.tap do |handle| - handle[:pending] = pending_handle - handle[:response] = response - handle[:result] = result - end - end - - let(:response) { 0 } - let(:result) { -1 } - - context "A subclass that does not implement the required methods" do - class BadTestHandle < Rdkafka::AbstractHandle - layout :pending, :bool, - :response, :int - end - - it "raises an exception if operation_name is called" do - expect { - BadTestHandle.new.operation_name - }.to raise_exception(RuntimeError, /Must be implemented by subclass!/) - end - - it "raises an exception if create_result is called" do - expect { - BadTestHandle.new.create_result - }.to raise_exception(RuntimeError, /Must be implemented by subclass!/) - end - end - - class TestHandle < Rdkafka::AbstractHandle - layout :pending, :bool, - :response, :int, - :result, :int - - def operation_name - "test_operation" - end - - def create_result - self[:result] - end - end - - describe ".register and .remove" do - let(:pending_handle) { true } - - it "registers and remove a delivery handle" do - described_class.register(handle) - removed = described_class.remove(handle.to_ptr.address) - expect(removed).to eq handle - expect(Rdkafka::AbstractHandle::REGISTRY).to be_empty - end - end - - describe "#pending?" do - context "when true" do - let(:pending_handle) { true } - - it "is true" do - expect(handle.pending?).to be true - end - end - - context "when not true" do - let(:pending_handle) { false } - - it "is false" do - expect(handle.pending?).to be false - end - end - end - - describe "#wait" do - context "when pending_handle true" do - let(:pending_handle) { true } - - it "waits until the timeout and then raise an error" do - expect { - handle.wait(max_wait_timeout_ms: 100) - }.to raise_error Rdkafka::AbstractHandle::WaitTimeoutError, /test_operation/ - end - end - - context "when pending_handle false" do - let(:pending_handle) { false } - - context "without error" do - let(:result) { 1 } - - it "returns a result" do - wait_result = handle.wait - expect(wait_result).to eq(result) - end - - it "waits without a timeout" do - wait_result = handle.wait(max_wait_timeout_ms: nil) - expect(wait_result).to eq(result) - end - end - - context "with error" do - let(:response) { 20 } - - it "raises an rdkafka error" do - expect { - handle.wait - }.to raise_error Rdkafka::RdkafkaError - end - end - - context "backwards compatibility with max_wait_timeout (seconds)" do - let(:result) { 42 } - - it "works with max_wait_timeout (emits deprecation warning to stderr)" do - # Note: Deprecation warning is emitted but not tested here due to RSpec stderr capture complexity - wait_result = handle.wait(max_wait_timeout: 5) - expect(wait_result).to eq(result) - end - - it "works with max_wait_timeout set to nil (wait forever)" do - wait_result = handle.wait(max_wait_timeout: nil) - expect(wait_result).to eq(result) - end - - it "properlies convert seconds to milliseconds" do - # Using a very short timeout to verify conversion - handle[:pending] = true - expect { - handle.wait(max_wait_timeout: 0.1) - }.to raise_error(Rdkafka::AbstractHandle::WaitTimeoutError, /100 ms/) - end - - it "uses new parameter when both are provided" do - # When both parameters provided, max_wait_timeout_ms takes precedence - wait_result = handle.wait(max_wait_timeout: 1, max_wait_timeout_ms: 5000) - expect(wait_result).to eq(result) - end - - it "timeouts based on max_wait_timeout_ms when both are provided" do - handle[:pending] = true - # max_wait_timeout: 10 would be 10000ms, but max_wait_timeout_ms: 100 should take precedence - expect { - handle.wait(max_wait_timeout: 10, max_wait_timeout_ms: 100) - }.to raise_error(Rdkafka::AbstractHandle::WaitTimeoutError, /100 ms/) - end - end - end - end -end diff --git a/spec/lib/rdkafka/admin/create_acl_handle_spec.rb b/spec/lib/rdkafka/admin/create_acl_handle_spec.rb deleted file mode 100644 index 73703fb1..00000000 --- a/spec/lib/rdkafka/admin/create_acl_handle_spec.rb +++ /dev/null @@ -1,54 +0,0 @@ -# frozen_string_literal: true - -RSpec.describe Rdkafka::Admin::CreateAclHandle do - # If create acl was successful there is no error object - # the error code is set to RD_KAFKA_RESP_ERR_NO_ERRORa - # https://github.com/confluentinc/librdkafka/blob/1f9f245ac409f50f724695c628c7a0d54a763b9a/src/rdkafka_error.c#L169 - let(:handle) do - described_class.new.tap do |handle| - handle[:pending] = pending_handle - handle[:response] = response - # If create acl was successful there is no error object and the error_string is set to "" - # https://github.com/confluentinc/librdkafka/blob/1f9f245ac409f50f724695c628c7a0d54a763b9a/src/rdkafka_error.c#L178 - handle[:response_string] = FFI::MemoryPointer.from_string("") - end - end - - let(:response) { Rdkafka::Bindings::RD_KAFKA_RESP_ERR_NO_ERROR } - - describe "#wait" do - let(:pending_handle) { true } - - it "waits until the timeout and then raise an error" do - expect { - handle.wait(max_wait_timeout_ms: 100) - }.to raise_error Rdkafka::Admin::CreateAclHandle::WaitTimeoutError, /create acl/ - end - - context "when not pending anymore and no error" do - let(:pending_handle) { false } - - it "returns a create acl report" do - report = handle.wait - - expect(report.rdkafka_response_string).to eq("") - end - - it "waits without a timeout" do - report = handle.wait(max_wait_timeout_ms: nil) - - expect(report.rdkafka_response_string).to eq("") - end - end - end - - describe "#raise_error" do - let(:pending_handle) { false } - - it "raises the appropriate error" do - expect { - handle.raise_error - }.to raise_exception(Rdkafka::RdkafkaError, /Success \(no_error\)/) - end - end -end diff --git a/spec/lib/rdkafka/admin/create_topic_handle_spec.rb b/spec/lib/rdkafka/admin/create_topic_handle_spec.rb deleted file mode 100644 index 08f31228..00000000 --- a/spec/lib/rdkafka/admin/create_topic_handle_spec.rb +++ /dev/null @@ -1,53 +0,0 @@ -# frozen_string_literal: true - -RSpec.describe Rdkafka::Admin::CreateTopicHandle do - let(:handle) do - described_class.new.tap do |handle| - handle[:pending] = pending_handle - handle[:response] = response - handle[:error_string] = FFI::Pointer::NULL - handle[:result_name] = FFI::MemoryPointer.from_string(topic_name) - end - end - - let(:response) { 0 } - let(:topic_name) { TestTopics.unique } - - describe "#wait" do - let(:pending_handle) { true } - - it "waits until the timeout and then raise an error" do - expect { - handle.wait(max_wait_timeout_ms: 100) - }.to raise_error Rdkafka::Admin::CreateTopicHandle::WaitTimeoutError, /create topic/ - end - - context "when not pending anymore and no error" do - let(:pending_handle) { false } - - it "returns a create topic report" do - report = handle.wait - - expect(report.error_string).to be_nil - expect(report.result_name).to eq(topic_name) - end - - it "waits without a timeout" do - report = handle.wait(max_wait_timeout_ms: nil) - - expect(report.error_string).to be_nil - expect(report.result_name).to eq(topic_name) - end - end - end - - describe "#raise_error" do - let(:pending_handle) { false } - - it "raises the appropriate error" do - expect { - handle.raise_error - }.to raise_exception(Rdkafka::RdkafkaError, /Success \(no_error\)/) - end - end -end diff --git a/spec/lib/rdkafka/admin/create_topic_report_spec.rb b/spec/lib/rdkafka/admin/create_topic_report_spec.rb deleted file mode 100644 index 898ec2f3..00000000 --- a/spec/lib/rdkafka/admin/create_topic_report_spec.rb +++ /dev/null @@ -1,18 +0,0 @@ -# frozen_string_literal: true - -RSpec.describe Rdkafka::Admin::CreateTopicReport do - let(:report) { - described_class.new( - FFI::MemoryPointer.from_string("error string"), - FFI::MemoryPointer.from_string("result name") - ) - } - - it "gets the error string" do - expect(report.error_string).to eq("error string") - end - - it "gets the result name" do - expect(report.result_name).to eq("result name") - end -end diff --git a/spec/lib/rdkafka/admin/delete_acl_handle_spec.rb b/spec/lib/rdkafka/admin/delete_acl_handle_spec.rb deleted file mode 100644 index f82615f4..00000000 --- a/spec/lib/rdkafka/admin/delete_acl_handle_spec.rb +++ /dev/null @@ -1,83 +0,0 @@ -# frozen_string_literal: true - -RSpec.describe Rdkafka::Admin::DeleteAclHandle do - let(:handle) do - error_buffer = FFI::MemoryPointer.from_string(" " * 256) - delete_acl_ptr = Rdkafka::Bindings.rd_kafka_AclBinding_new( - resource_type, - FFI::MemoryPointer.from_string(resource_name), - resource_pattern_type, - FFI::MemoryPointer.from_string(principal), - FFI::MemoryPointer.from_string(host), - operation, - permission_type, - error_buffer, - 256 - ) - if delete_acl_ptr.null? - raise Rdkafka::Config::ConfigError.new(error_buffer.read_string) - end - pointer_array = [delete_acl_ptr] - delete_acls_array_ptr = FFI::MemoryPointer.new(:pointer) - delete_acls_array_ptr.write_array_of_pointer(pointer_array) - described_class.new.tap do |handle| - handle[:pending] = pending_handle - handle[:response] = response - handle[:response_string] = FFI::MemoryPointer.from_string("") - handle[:matching_acls] = delete_acls_array_ptr - handle[:matching_acls_count] = 1 - end - end - - let(:response) { Rdkafka::Bindings::RD_KAFKA_RESP_ERR_NO_ERROR } - let(:resource_name) { TestTopics.unique } - let(:resource_type) { Rdkafka::Bindings::RD_KAFKA_RESOURCE_TOPIC } - let(:resource_pattern_type) { Rdkafka::Bindings::RD_KAFKA_RESOURCE_PATTERN_LITERAL } - let(:principal) { "User:anonymous" } - let(:host) { "*" } - let(:operation) { Rdkafka::Bindings::RD_KAFKA_ACL_OPERATION_READ } - let(:permission_type) { Rdkafka::Bindings::RD_KAFKA_ACL_PERMISSION_TYPE_ALLOW } - let(:delete_acl_ptr) { FFI::Pointer::NULL } - - after do - if delete_acl_ptr != FFI::Pointer::NULL - Rdkafka::Bindings.rd_kafka_AclBinding_destroy(delete_acl_ptr) - end - end - - describe "#wait" do - let(:pending_handle) { true } - - it "waits until the timeout and then raise an error" do - expect { - handle.wait(max_wait_timeout_ms: 100) - }.to raise_error Rdkafka::Admin::DeleteAclHandle::WaitTimeoutError, /delete acl/ - end - - context "when not pending anymore and no error" do - let(:pending_handle) { false } - - it "returns a delete acl report" do - report = handle.wait - - expect(report.deleted_acls.length).to eq(1) - end - - it "waits without a timeout" do - report = handle.wait(max_wait_timeout_ms: nil) - - expect(report.deleted_acls[0].matching_acl_resource_name).to eq(resource_name) - end - end - end - - describe "#raise_error" do - let(:pending_handle) { false } - - it "raises the appropriate error" do - expect { - handle.raise_error - }.to raise_exception(Rdkafka::RdkafkaError, /Success \(no_error\)/) - end - end -end diff --git a/spec/lib/rdkafka/admin/delete_acl_report_spec.rb b/spec/lib/rdkafka/admin/delete_acl_report_spec.rb deleted file mode 100644 index 5eb4511d..00000000 --- a/spec/lib/rdkafka/admin/delete_acl_report_spec.rb +++ /dev/null @@ -1,69 +0,0 @@ -# frozen_string_literal: true - -RSpec.describe Rdkafka::Admin::DeleteAclReport do - let(:report) do - error_buffer = FFI::MemoryPointer.from_string(" " * 256) - delete_acl_ptr = Rdkafka::Bindings.rd_kafka_AclBinding_new( - resource_type, - FFI::MemoryPointer.from_string(resource_name), - resource_pattern_type, - FFI::MemoryPointer.from_string(principal), - FFI::MemoryPointer.from_string(host), - operation, - permission_type, - error_buffer, - 256 - ) - if delete_acl_ptr.null? - raise Rdkafka::Config::ConfigError.new(error_buffer.read_string) - end - pointer_array = [delete_acl_ptr] - delete_acls_array_ptr = FFI::MemoryPointer.new(:pointer) - delete_acls_array_ptr.write_array_of_pointer(pointer_array) - described_class.new(matching_acls: delete_acls_array_ptr, matching_acls_count: 1) - end - - let(:resource_name) { TestTopics.unique } - let(:resource_type) { Rdkafka::Bindings::RD_KAFKA_RESOURCE_TOPIC } - let(:resource_pattern_type) { Rdkafka::Bindings::RD_KAFKA_RESOURCE_PATTERN_LITERAL } - let(:principal) { "User:anonymous" } - let(:host) { "*" } - let(:operation) { Rdkafka::Bindings::RD_KAFKA_ACL_OPERATION_READ } - let(:permission_type) { Rdkafka::Bindings::RD_KAFKA_ACL_PERMISSION_TYPE_ALLOW } - let(:delete_acl_ptr) { FFI::Pointer::NULL } - - after do - if delete_acl_ptr != FFI::Pointer::NULL - Rdkafka::Bindings.rd_kafka_AclBinding_destroy(delete_acl_ptr) - end - end - - it "gets deleted acl resource type as Rdkafka::Bindings::RD_KAFKA_RESOURCE_TOPIC" do - expect(report.deleted_acls[0].matching_acl_resource_type).to eq(Rdkafka::Bindings::RD_KAFKA_RESOURCE_TOPIC) - end - - it "gets deleted acl resource name" do - expect(report.deleted_acls[0].matching_acl_resource_name).to eq(resource_name) - end - - it "gets deleted acl resource pattern type as Rdkafka::Bindings::RD_KAFKA_RESOURCE_PATTERN_LITERAL" do - expect(report.deleted_acls[0].matching_acl_resource_pattern_type).to eq(Rdkafka::Bindings::RD_KAFKA_RESOURCE_PATTERN_LITERAL) - expect(report.deleted_acls[0].matching_acl_pattern_type).to eq(Rdkafka::Bindings::RD_KAFKA_RESOURCE_PATTERN_LITERAL) - end - - it "gets deleted acl principal as User:anonymous" do - expect(report.deleted_acls[0].matching_acl_principal).to eq("User:anonymous") - end - - it "gets deleted acl host as *" do - expect(report.deleted_acls[0].matching_acl_host).to eq("*") - end - - it "gets deleted acl operation as Rdkafka::Bindings::RD_KAFKA_ACL_OPERATION_READ" do - expect(report.deleted_acls[0].matching_acl_operation).to eq(Rdkafka::Bindings::RD_KAFKA_ACL_OPERATION_READ) - end - - it "gets deleted acl permission_type as Rdkafka::Bindings::RD_KAFKA_ACL_PERMISSION_TYPE_ALLOW" do - expect(report.deleted_acls[0].matching_acl_permission_type).to eq(Rdkafka::Bindings::RD_KAFKA_ACL_PERMISSION_TYPE_ALLOW) - end -end diff --git a/spec/lib/rdkafka/admin/delete_topic_handle_spec.rb b/spec/lib/rdkafka/admin/delete_topic_handle_spec.rb deleted file mode 100644 index d3311f49..00000000 --- a/spec/lib/rdkafka/admin/delete_topic_handle_spec.rb +++ /dev/null @@ -1,55 +0,0 @@ -# frozen_string_literal: true - -RSpec.describe Rdkafka::Admin::DeleteTopicHandle do - let(:handle) do - described_class.new.tap do |handle| - handle[:pending] = pending_handle - handle[:response] = response - handle[:error_string] = FFI::Pointer::NULL - handle[:result_name] = FFI::MemoryPointer.from_string(topic_name) - end - end - - let(:response) { 0 } - let(:topic_name) { TestTopics.unique } - - describe "#wait" do - let(:pending_handle) { true } - - it "waits until the timeout and then raise an error" do - expect { - handle.wait(max_wait_timeout_ms: 100) - }.to raise_error Rdkafka::Admin::DeleteTopicHandle::WaitTimeoutError, /delete topic/ - end - - context "when not pending anymore and no error" do - let(:pending_handle) { false } - - it "returns a delete topic report" do - report = handle.wait - - expect(report.error_string).to be_nil - expect(report.result_name).to eq(topic_name) - end - - it "waits without a timeout" do - report = handle.wait(max_wait_timeout_ms: nil) - - expect(report.error_string).to be_nil - expect(report.result_name).to eq(topic_name) - end - end - end - - describe "#raise_error" do - let(:pending_handle) { false } - - before { handle[:response] = -1 } - - it "raises the appropriate error" do - expect { - handle.raise_error - }.to raise_exception(Rdkafka::RdkafkaError, /Unknown broker error \(unknown\)/) - end - end -end diff --git a/spec/lib/rdkafka/admin/delete_topic_report_spec.rb b/spec/lib/rdkafka/admin/delete_topic_report_spec.rb deleted file mode 100644 index 8cad6203..00000000 --- a/spec/lib/rdkafka/admin/delete_topic_report_spec.rb +++ /dev/null @@ -1,18 +0,0 @@ -# frozen_string_literal: true - -RSpec.describe Rdkafka::Admin::DeleteTopicReport do - let(:report) { - described_class.new( - FFI::MemoryPointer.from_string("error string"), - FFI::MemoryPointer.from_string("result name") - ) - } - - it "gets the error string" do - expect(report.error_string).to eq("error string") - end - - it "gets the result name" do - expect(report.result_name).to eq("result name") - end -end diff --git a/spec/lib/rdkafka/admin/describe_acl_handle_spec.rb b/spec/lib/rdkafka/admin/describe_acl_handle_spec.rb deleted file mode 100644 index 19e4106c..00000000 --- a/spec/lib/rdkafka/admin/describe_acl_handle_spec.rb +++ /dev/null @@ -1,83 +0,0 @@ -# frozen_string_literal: true - -RSpec.describe Rdkafka::Admin::DescribeAclHandle do - let(:handle) do - error_buffer = FFI::MemoryPointer.from_string(" " * 256) - describe_acl_ptr = Rdkafka::Bindings.rd_kafka_AclBinding_new( - resource_type, - FFI::MemoryPointer.from_string(resource_name), - resource_pattern_type, - FFI::MemoryPointer.from_string(principal), - FFI::MemoryPointer.from_string(host), - operation, - permission_type, - error_buffer, - 256 - ) - if describe_acl_ptr.null? - raise Rdkafka::Config::ConfigError.new(error_buffer.read_string) - end - pointer_array = [describe_acl_ptr] - describe_acls_array_ptr = FFI::MemoryPointer.new(:pointer) - describe_acls_array_ptr.write_array_of_pointer(pointer_array) - described_class.new.tap do |handle| - handle[:pending] = pending_handle - handle[:response] = response - handle[:response_string] = FFI::MemoryPointer.from_string("") - handle[:acls] = describe_acls_array_ptr - handle[:acls_count] = 1 - end - end - - let(:response) { Rdkafka::Bindings::RD_KAFKA_RESP_ERR_NO_ERROR } - let(:resource_name) { TestTopics.unique } - let(:resource_type) { Rdkafka::Bindings::RD_KAFKA_RESOURCE_TOPIC } - let(:resource_pattern_type) { Rdkafka::Bindings::RD_KAFKA_RESOURCE_PATTERN_LITERAL } - let(:principal) { "User:anonymous" } - let(:host) { "*" } - let(:operation) { Rdkafka::Bindings::RD_KAFKA_ACL_OPERATION_READ } - let(:permission_type) { Rdkafka::Bindings::RD_KAFKA_ACL_PERMISSION_TYPE_ALLOW } - let(:describe_acl_ptr) { FFI::Pointer::NULL } - - after do - if describe_acl_ptr != FFI::Pointer::NULL - Rdkafka::Bindings.rd_kafka_AclBinding_destroy(describe_acl_ptr) - end - end - - describe "#wait" do - let(:pending_handle) { true } - - it "waits until the timeout and then raise an error" do - expect { - handle.wait(max_wait_timeout_ms: 100) - }.to raise_error Rdkafka::Admin::DescribeAclHandle::WaitTimeoutError, /describe acl/ - end - - context "when not pending anymore and no error" do - let(:pending_handle) { false } - - it "returns a describe acl report" do - report = handle.wait - - expect(report.acls.length).to eq(1) - end - - it "waits without a timeout" do - report = handle.wait(max_wait_timeout_ms: nil) - - expect(report.acls[0].matching_acl_resource_name).to eq(resource_name) - end - end - end - - describe "#raise_error" do - let(:pending_handle) { false } - - it "raises the appropriate error" do - expect { - handle.raise_error - }.to raise_exception(Rdkafka::RdkafkaError, /Success \(no_error\)/) - end - end -end diff --git a/spec/lib/rdkafka/admin/describe_acl_report_spec.rb b/spec/lib/rdkafka/admin/describe_acl_report_spec.rb deleted file mode 100644 index 8e72e7b6..00000000 --- a/spec/lib/rdkafka/admin/describe_acl_report_spec.rb +++ /dev/null @@ -1,69 +0,0 @@ -# frozen_string_literal: true - -RSpec.describe Rdkafka::Admin::DescribeAclReport do - let(:report) do - error_buffer = FFI::MemoryPointer.from_string(" " * 256) - describe_acl_ptr = Rdkafka::Bindings.rd_kafka_AclBinding_new( - resource_type, - FFI::MemoryPointer.from_string(resource_name), - resource_pattern_type, - FFI::MemoryPointer.from_string(principal), - FFI::MemoryPointer.from_string(host), - operation, - permission_type, - error_buffer, - 256 - ) - if describe_acl_ptr.null? - raise Rdkafka::Config::ConfigError.new(error_buffer.read_string) - end - pointer_array = [describe_acl_ptr] - describe_acls_array_ptr = FFI::MemoryPointer.new(:pointer) - describe_acls_array_ptr.write_array_of_pointer(pointer_array) - described_class.new(acls: describe_acls_array_ptr, acls_count: 1) - end - - let(:resource_name) { TestTopics.unique } - let(:resource_type) { Rdkafka::Bindings::RD_KAFKA_RESOURCE_TOPIC } - let(:resource_pattern_type) { Rdkafka::Bindings::RD_KAFKA_RESOURCE_PATTERN_LITERAL } - let(:principal) { "User:anonymous" } - let(:host) { "*" } - let(:operation) { Rdkafka::Bindings::RD_KAFKA_ACL_OPERATION_READ } - let(:permission_type) { Rdkafka::Bindings::RD_KAFKA_ACL_PERMISSION_TYPE_ALLOW } - let(:describe_acl_ptr) { FFI::Pointer::NULL } - - after do - if describe_acl_ptr != FFI::Pointer::NULL - Rdkafka::Bindings.rd_kafka_AclBinding_destroy(describe_acl_ptr) - end - end - - it "gets matching acl resource type as Rdkafka::Bindings::RD_KAFKA_RESOURCE_TOPIC" do - expect(report.acls[0].matching_acl_resource_type).to eq(Rdkafka::Bindings::RD_KAFKA_RESOURCE_TOPIC) - end - - it "gets matching acl resource name" do - expect(report.acls[0].matching_acl_resource_name).to eq(resource_name) - end - - it "gets matching acl resource pattern type as Rdkafka::Bindings::RD_KAFKA_RESOURCE_PATTERN_LITERAL" do - expect(report.acls[0].matching_acl_resource_pattern_type).to eq(Rdkafka::Bindings::RD_KAFKA_RESOURCE_PATTERN_LITERAL) - expect(report.acls[0].matching_acl_pattern_type).to eq(Rdkafka::Bindings::RD_KAFKA_RESOURCE_PATTERN_LITERAL) - end - - it "gets matching acl principal as User:anonymous" do - expect(report.acls[0].matching_acl_principal).to eq("User:anonymous") - end - - it "gets matching acl host as *" do - expect(report.acls[0].matching_acl_host).to eq("*") - end - - it "gets matching acl operation as Rdkafka::Bindings::RD_KAFKA_ACL_OPERATION_READ" do - expect(report.acls[0].matching_acl_operation).to eq(Rdkafka::Bindings::RD_KAFKA_ACL_OPERATION_READ) - end - - it "gets matching acl permission_type as Rdkafka::Bindings::RD_KAFKA_ACL_PERMISSION_TYPE_ALLOW" do - expect(report.acls[0].matching_acl_permission_type).to eq(Rdkafka::Bindings::RD_KAFKA_ACL_PERMISSION_TYPE_ALLOW) - end -end diff --git a/spec/lib/rdkafka/admin/list_offsets_handle_spec.rb b/spec/lib/rdkafka/admin/list_offsets_handle_spec.rb deleted file mode 100644 index beb85f94..00000000 --- a/spec/lib/rdkafka/admin/list_offsets_handle_spec.rb +++ /dev/null @@ -1,52 +0,0 @@ -# frozen_string_literal: true - -RSpec.describe Rdkafka::Admin::ListOffsetsHandle do - let(:handle) do - described_class.new.tap do |handle| - handle[:pending] = pending_handle - handle[:response] = response - handle[:response_string] = FFI::MemoryPointer.from_string("") - handle[:result_infos] = FFI::Pointer::NULL - handle[:result_count] = 0 - end - end - - let(:response) { Rdkafka::Bindings::RD_KAFKA_RESP_ERR_NO_ERROR } - - describe "#wait" do - let(:pending_handle) { true } - - it "waits until the timeout and then raises an error" do - expect { - handle.wait(max_wait_timeout_ms: 100) - }.to raise_error Rdkafka::Admin::ListOffsetsHandle::WaitTimeoutError, /list offsets/ - end - - context "when not pending anymore and no error" do - let(:pending_handle) { false } - - it "returns a list offsets report" do - report = handle.wait - - expect(report).to be_a(Rdkafka::Admin::ListOffsetsReport) - expect(report.offsets).to eq([]) - end - - it "waits without a timeout" do - report = handle.wait(max_wait_timeout_ms: nil) - - expect(report.offsets).to eq([]) - end - end - end - - describe "#raise_error" do - let(:pending_handle) { false } - - it "raises the appropriate error" do - expect { - handle.raise_error - }.to raise_exception(Rdkafka::RdkafkaError, /Success \(no_error\)/) - end - end -end diff --git a/spec/lib/rdkafka/admin/list_offsets_report_spec.rb b/spec/lib/rdkafka/admin/list_offsets_report_spec.rb deleted file mode 100644 index 0c771ca6..00000000 --- a/spec/lib/rdkafka/admin/list_offsets_report_spec.rb +++ /dev/null @@ -1,13 +0,0 @@ -# frozen_string_literal: true - -RSpec.describe Rdkafka::Admin::ListOffsetsReport do - describe "#initialize" do - context "when result_infos is NULL" do - let(:report) { described_class.new(result_infos: FFI::Pointer::NULL, result_count: 0) } - - it "returns empty offsets" do - expect(report.offsets).to eq([]) - end - end - end -end diff --git a/spec/lib/rdkafka/admin_spec.rb b/spec/lib/rdkafka/admin_spec.rb deleted file mode 100644 index 1fa4407e..00000000 --- a/spec/lib/rdkafka/admin_spec.rb +++ /dev/null @@ -1,1198 +0,0 @@ -# frozen_string_literal: true - -require "ostruct" - -RSpec.describe Rdkafka::Admin do - let(:config) { rdkafka_config } - let(:topic_name) { "test-topic-#{SecureRandom.uuid}" } - let(:topic_partition_count) { 3 } - let(:topic_replication_factor) { 1 } - let(:topic_config) { { "cleanup.policy" => "compact", "min.cleanable.dirty.ratio" => 0.8 } } - let(:invalid_topic_config) { { "cleeeeenup.policee" => "campact" } } - let(:group_name) { "test-group-#{SecureRandom.uuid}" } - let(:resource_name) { TestTopics.unique } - let(:resource_type) { Rdkafka::Bindings::RD_KAFKA_RESOURCE_TOPIC } - let(:resource_pattern_type) { Rdkafka::Bindings::RD_KAFKA_RESOURCE_PATTERN_LITERAL } - let(:principal) { "User:anonymous" } - let(:host) { "*" } - let(:operation) { Rdkafka::Bindings::RD_KAFKA_ACL_OPERATION_READ } - let(:permission_type) { Rdkafka::Bindings::RD_KAFKA_ACL_PERMISSION_TYPE_ALLOW } - let(:admin) { config.admin } - - after do - # Registry should always end up being empty - expect(Rdkafka::Admin::CreateTopicHandle::REGISTRY).to be_empty - expect(Rdkafka::Admin::CreatePartitionsHandle::REGISTRY).to be_empty - expect(Rdkafka::Admin::DescribeAclHandle::REGISTRY).to be_empty - expect(Rdkafka::Admin::CreateAclHandle::REGISTRY).to be_empty - expect(Rdkafka::Admin::DeleteAclHandle::REGISTRY).to be_empty - expect(Rdkafka::Admin::ListOffsetsHandle::REGISTRY).to be_empty - admin.close - end - - describe "#describe_errors" do - let(:errors) { admin.class.describe_errors } - - it { expect(errors.size).to eq(172) } - it { expect(errors[-184]).to eq(code: -184, description: "Local: Queue full", name: "_QUEUE_FULL") } - it { expect(errors[21]).to eq(code: 21, description: "Broker: Invalid required acks value", name: "INVALID_REQUIRED_ACKS") } - end - - describe "admin without auto-start" do - let(:admin) { config.admin(native_kafka_auto_start: false) } - - it "expect to be able to start it later and close" do - admin.start - admin.close - end - - it "expect to be able to close it without starting" do - admin.close - end - end - - describe "#create_topic" do - describe "called with invalid input" do - describe "with an invalid topic name" do - # https://github.com/apache/kafka/blob/trunk/clients/src/main/java/org/apache/kafka/common/internals/Topic.java#L29 - # public static final String LEGAL_CHARS = "[a-zA-Z0-9._-]"; - let(:topic_name) { "[!@#]" } - - it "raises an exception" do - create_topic_handle = admin.create_topic(topic_name, topic_partition_count, topic_replication_factor) - expect { - create_topic_handle.wait(max_wait_timeout_ms: 15_000) - }.to raise_exception { |ex| - expect(ex).to be_a(Rdkafka::RdkafkaError) - expect(ex.message).to match(/Broker: Invalid topic \(topic_exception\)/) - expect(ex.broker_message).to match(/Topic name.*is invalid: .* contains one or more characters other than ASCII alphanumerics, '.', '_' and '-'/) - } - end - end - - describe "with the name of a topic that already exists" do - let(:topic_name) { TestTopics.create } - - it "raises an exception" do - create_topic_handle = admin.create_topic(topic_name, topic_partition_count, topic_replication_factor) - expect { - create_topic_handle.wait(max_wait_timeout_ms: 15_000) - }.to raise_exception { |ex| - expect(ex).to be_a(Rdkafka::RdkafkaError) - expect(ex.message).to match(/Broker: Topic already exists \(topic_already_exists\)/) - expect(ex.broker_message).to match(/Topic '#{Regexp.escape(topic_name)}' already exists/) - } - end - end - - describe "with an invalid partition count" do - let(:topic_partition_count) { -999 } - - it "raises an exception" do - expect { - admin.create_topic(topic_name, topic_partition_count, topic_replication_factor) - }.to raise_error Rdkafka::Config::ConfigError, /num_partitions out of expected range/ - end - end - - describe "with an invalid replication factor" do - let(:topic_replication_factor) { -2 } - - it "raises an exception" do - expect { - admin.create_topic(topic_name, topic_partition_count, topic_replication_factor) - }.to raise_error Rdkafka::Config::ConfigError, /replication_factor out of expected range/ - end - end - - describe "with an invalid topic configuration" do - it "doesn't create the topic" do - create_topic_handle = admin.create_topic(topic_name, topic_partition_count, topic_replication_factor, invalid_topic_config) - expect { - create_topic_handle.wait(max_wait_timeout_ms: 15_000) - }.to raise_error Rdkafka::RdkafkaError, /Broker: Configuration is invalid \(invalid_config\)/ - end - end - end - - context "edge case" do - context "where we are unable to get the background queue" do - before do - allow(Rdkafka::Bindings).to receive(:rd_kafka_queue_get_background).and_return(FFI::Pointer::NULL) - end - - it "raises an exception" do - expect { - admin.create_topic(topic_name, topic_partition_count, topic_replication_factor) - }.to raise_error Rdkafka::Config::ConfigError, /rd_kafka_queue_get_background was NULL/ - end - end - - context "where rd_kafka_CreateTopics raises an exception" do - before do - allow(Rdkafka::Bindings).to receive(:rd_kafka_CreateTopics).and_raise(RuntimeError.new("oops")) - end - - it "raises an exception" do - expect { - admin.create_topic(topic_name, topic_partition_count, topic_replication_factor) - }.to raise_error RuntimeError, /oops/ - end - end - end - - it "creates a topic" do - create_topic_handle = admin.create_topic(topic_name, topic_partition_count, topic_replication_factor, topic_config) - create_topic_report = create_topic_handle.wait(max_wait_timeout_ms: 15_000) - expect(create_topic_report.error_string).to be_nil - expect(create_topic_report.result_name).to eq(topic_name) - end - end - - describe "describe_configs" do - let(:resources_results) { admin.describe_configs(resources).wait.resources } - - before do - admin.create_topic(topic_name, 2, 1).wait - sleep(1) - end - - context "when describing config of an existing topic" do - let(:resources) { [{ resource_type: 2, resource_name: topic_name }] } - - it do - expect(resources_results.size).to eq(1) - expect(resources_results.first.type).to eq(2) - expect(resources_results.first.name).to eq(topic_name) - expect(resources_results.first.configs.size).to be > 25 - expect(resources_results.first.configs.first.name).to eq("compression.type") - expect(resources_results.first.configs.first.value).to eq("producer") - expect(resources_results.first.configs.map(&:synonyms)).not_to be_empty - end - end - - context "when describing config of a non-existing topic" do - let(:resources) { [{ resource_type: 2, resource_name: SecureRandom.uuid }] } - - it "expect to raise error" do - expect { resources_results }.to raise_error(Rdkafka::RdkafkaError, /unknown_topic_or_part/) - end - end - - context "when describing both existing and non-existing topics" do - let(:resources) do - [ - { resource_type: 2, resource_name: topic_name }, - { resource_type: 2, resource_name: SecureRandom.uuid } - ] - end - - it "expect to raise error" do - expect { resources_results }.to raise_error(Rdkafka::RdkafkaError, /unknown_topic_or_part/) - end - end - - context "when describing multiple existing topics" do - let(:resources) do - [ - { resource_type: 2, resource_name: TestTopics.example_topic }, - { resource_type: 2, resource_name: topic_name } - ] - end - - it do - expect(resources_results.size).to eq(2) - expect(resources_results.first.type).to eq(2) - expect(resources_results.first.name).to eq(TestTopics.example_topic) - expect(resources_results.last.type).to eq(2) - expect(resources_results.last.name).to eq(topic_name) - end - end - - context "when trying to describe invalid resource type" do - let(:resources) { [{ resource_type: 0, resource_name: SecureRandom.uuid }] } - - it "expect to raise error" do - expect { resources_results }.to raise_error(Rdkafka::RdkafkaError, /invalid_request/) - end - end - - context "when trying to describe invalid broker" do - let(:resources) { [{ resource_type: 4, resource_name: "non-existing" }] } - - it "expect to raise error" do - expect { resources_results }.to raise_error(Rdkafka::RdkafkaError, /invalid_arg/) - end - end - - context "when trying to describe valid broker" do - let(:resources) { [{ resource_type: 4, resource_name: "1" }] } - - it do - expect(resources_results.size).to eq(1) - expect(resources_results.first.type).to eq(4) - expect(resources_results.first.name).to eq("1") - expect(resources_results.first.configs.size).to be > 230 - expect(resources_results.first.configs.first.name).to eq("log.cleaner.min.compaction.lag.ms") - expect(resources_results.first.configs.first.value).to eq("0") - expect(resources_results.first.configs.map(&:synonyms)).not_to be_empty - end - end - - context "when describing valid broker with topics in one request" do - let(:resources) do - [ - { resource_type: 4, resource_name: "1" }, - { resource_type: 2, resource_name: topic_name } - ] - end - - it do - expect(resources_results.size).to eq(2) - expect(resources_results.first.type).to eq(4) - expect(resources_results.first.name).to eq("1") - expect(resources_results.first.configs.size).to be > 230 - expect(resources_results.first.configs.first.name).to eq("log.cleaner.min.compaction.lag.ms") - expect(resources_results.first.configs.first.value).to eq("0") - expect(resources_results.last.type).to eq(2) - expect(resources_results.last.name).to eq(topic_name) - expect(resources_results.last.configs.size).to be > 25 - expect(resources_results.last.configs.first.name).to eq("compression.type") - expect(resources_results.last.configs.first.value).to eq("producer") - end - end - end - - describe "incremental_alter_configs" do - let(:resources_results) { admin.incremental_alter_configs(resources_with_configs).wait.resources } - - before do - admin.create_topic(topic_name, 2, 1).wait - sleep(1) - end - - context "when altering one topic with one valid config via set" do - let(:target_retention) { rand(86400002..86410001).to_s } - let(:resources_with_configs) do - [ - { - resource_type: 2, - resource_name: topic_name, - configs: [ - { - name: "delete.retention.ms", - value: target_retention, - op_type: 0 - } - ] - } - ] - end - - it do - expect(resources_results.size).to eq(1) - expect(resources_results.first.type).to eq(2) - expect(resources_results.first.name).to eq(topic_name) - - sleep(1) - - ret_config = admin.describe_configs(resources_with_configs).wait.resources.first.configs.find do |config| - config.name == "delete.retention.ms" - end - - expect(ret_config.value).to eq(target_retention) - end - end - - context "when altering one topic with one valid config via delete" do - let(:target_retention) { rand(8640002..8650001).to_s } - let(:resources_with_configs) do - [ - { - resource_type: 2, - resource_name: topic_name, - configs: [ - { - name: "delete.retention.ms", - value: target_retention, - op_type: 1 - } - ] - } - ] - end - - it do - expect(resources_results.size).to eq(1) - expect(resources_results.first.type).to eq(2) - expect(resources_results.first.name).to eq(topic_name) - - sleep(1) - - ret_config = admin.describe_configs(resources_with_configs).wait.resources.first.configs.find do |config| - config.name == "delete.retention.ms" - end - - expect(ret_config.value).to eq("86400000") - end - end - - context "when altering one topic with one valid config via append" do - let(:target_policy) { "compact" } - let(:resources_with_configs) do - [ - { - resource_type: 2, - resource_name: topic_name, - configs: [ - { - name: "cleanup.policy", - value: target_policy, - op_type: 2 - } - ] - } - ] - end - - it do - expect(resources_results.size).to eq(1) - expect(resources_results.first.type).to eq(2) - expect(resources_results.first.name).to eq(topic_name) - - sleep(1) - - ret_config = admin.describe_configs(resources_with_configs).wait.resources.first.configs.find do |config| - config.name == "cleanup.policy" - end - - expect(ret_config.value).to eq("delete,#{target_policy}") - end - end - - context "when altering one topic with one valid config via subtrack" do - let(:target_policy) { "delete" } - let(:resources_with_configs) do - [ - { - resource_type: 2, - resource_name: topic_name, - configs: [ - { - name: "cleanup.policy", - value: target_policy, - op_type: 3 - } - ] - } - ] - end - - it do - expect(resources_results.size).to eq(1) - expect(resources_results.first.type).to eq(2) - expect(resources_results.first.name).to eq(topic_name) - - sleep(1) - - ret_config = admin.describe_configs(resources_with_configs).wait.resources.first.configs.find do |config| - config.name == "cleanup.policy" - end - - expect(ret_config.value).to eq("") - end - end - - context "when altering one topic with invalid config" do - let(:target_retention) { "-10" } - let(:resources_with_configs) do - [ - { - resource_type: 2, - resource_name: topic_name, - configs: [ - { - name: "delete.retention.ms", - value: target_retention, - op_type: 0 - } - ] - } - ] - end - - it "expect to raise error" do - expect { resources_results }.to raise_error(Rdkafka::RdkafkaError, /invalid_config/) - end - end - end - - describe "#list_offsets" do - context "when querying offsets for an existing topic with messages" do - let(:topic) { TestTopics.create } - - before do - # Produce a message to ensure partition leaders are fully established - producer = rdkafka_config.producer - producer.produce(topic: topic, payload: "warmup", partition: 0).wait - producer.close - end - - it "returns earliest offsets" do - report = admin.list_offsets( - { topic => [{ partition: 0, offset: :earliest }] } - ).wait(max_wait_timeout_ms: 15_000) - - expect(report).to be_a(Rdkafka::Admin::ListOffsetsReport) - expect(report.offsets.length).to be >= 1 - - first = report.offsets.first - expect(first[:topic]).to eq(topic) - expect(first[:partition]).to eq(0) - expect(first[:offset]).to be >= 0 - end - - it "returns latest offsets" do - report = admin.list_offsets( - { topic => [{ partition: 0, offset: :latest }] } - ).wait(max_wait_timeout_ms: 15_000) - - expect(report.offsets.length).to be >= 1 - - first = report.offsets.first - expect(first[:topic]).to eq(topic) - expect(first[:partition]).to eq(0) - expect(first[:offset]).to be >= 0 - end - - it "returns offsets for multiple partitions at once" do - report = admin.list_offsets( - { topic => [ - { partition: 0, offset: :earliest }, - { partition: 1, offset: :latest } - ] } - ).wait(max_wait_timeout_ms: 15_000) - - expect(report.offsets.length).to eq(2) - expect(report.offsets.map { |o| o[:partition] }.sort).to eq([0, 1]) - end - - it "returns offsets with read_committed isolation level" do - report = admin.list_offsets( - { topic => [{ partition: 0, offset: :latest }] }, - isolation_level: Rdkafka::Bindings::RD_KAFKA_ISOLATION_LEVEL_READ_COMMITTED - ).wait(max_wait_timeout_ms: 15_000) - - expect(report.offsets.length).to eq(1) - end - end - - context "when querying offsets by timestamp" do - let(:topic) { TestTopics.create } - - it "returns offsets for a given timestamp" do - # Use a timestamp of 0 (epoch) to get earliest messages. - # Retry on transient broker errors (not_leader_for_partition) that can - # occur when partition leadership hasn't fully settled after topic creation. - report = nil - 3.times do - report = admin.list_offsets( - { topic => [{ partition: 0, offset: 0 }] } - ).wait(max_wait_timeout_ms: 15_000) - break - rescue Rdkafka::RdkafkaError => e - raise unless e.message.include?("not_leader_for_partition") - - sleep(1) - end - - expect(report.offsets.length).to eq(1) - first = report.offsets.first - expect(first[:topic]).to eq(topic) - expect(first[:partition]).to eq(0) - end - end - - context "when admin is closed" do - it "raises ClosedAdminError" do - admin.close - expect { - admin.list_offsets({ "topic" => [{ partition: 0, offset: :earliest }] }) - }.to raise_error(Rdkafka::ClosedAdminError) - end - end - - context "edge case" do - context "where we are unable to get the background queue" do - before do - allow(Rdkafka::Bindings).to receive(:rd_kafka_queue_get_background).and_return(FFI::Pointer::NULL) - end - - it "raises an exception" do - expect { - admin.list_offsets({ "topic" => [{ partition: 0, offset: :earliest }] }) - }.to raise_error Rdkafka::Config::ConfigError, /rd_kafka_queue_get_background was NULL/ - end - end - - context "where rd_kafka_ListOffsets raises an exception" do - before do - allow(Rdkafka::Bindings).to receive(:rd_kafka_ListOffsets).and_raise(RuntimeError.new("oops")) - end - - it "raises an exception" do - expect { - admin.list_offsets({ "topic" => [{ partition: 0, offset: :earliest }] }) - }.to raise_error RuntimeError, /oops/ - end - end - end - - context "with invalid offset specification" do - it "raises ArgumentError for unknown symbol" do - expect { - admin.list_offsets({ "topic" => [{ partition: 0, offset: :unknown }] }) - }.to raise_error(ArgumentError, /Unknown offset specification/) - end - end - end - - describe "#delete_topic" do - describe "called with invalid input" do - # https://github.com/apache/kafka/blob/trunk/clients/src/main/java/org/apache/kafka/common/internals/Topic.java#L29 - # public static final String LEGAL_CHARS = "[a-zA-Z0-9._-]"; - describe "with an invalid topic name" do - let(:topic_name) { "[!@#]" } - - it "raises an exception" do - delete_topic_handle = admin.delete_topic(topic_name) - expect { - delete_topic_handle.wait(max_wait_timeout_ms: 15_000) - }.to raise_exception { |ex| - expect(ex).to be_a(Rdkafka::RdkafkaError) - expect(ex.message).to match(/Broker: Unknown topic or partition \(unknown_topic_or_part\)/) - expect(ex.broker_message).to match(/Broker: Unknown topic or partition/) - } - end - end - - describe "with the name of a topic that does not exist" do - it "raises an exception" do - delete_topic_handle = admin.delete_topic(topic_name) - expect { - delete_topic_handle.wait(max_wait_timeout_ms: 15_000) - }.to raise_exception { |ex| - expect(ex).to be_a(Rdkafka::RdkafkaError) - expect(ex.message).to match(/Broker: Unknown topic or partition \(unknown_topic_or_part\)/) - expect(ex.broker_message).to match(/Broker: Unknown topic or partition/) - } - end - end - end - - context "edge case" do - context "where we are unable to get the background queue" do - before do - allow(Rdkafka::Bindings).to receive(:rd_kafka_queue_get_background).and_return(FFI::Pointer::NULL) - end - - it "raises an exception" do - expect { - admin.delete_topic(topic_name) - }.to raise_error Rdkafka::Config::ConfigError, /rd_kafka_queue_get_background was NULL/ - end - end - - context "where rd_kafka_DeleteTopics raises an exception" do - before do - allow(Rdkafka::Bindings).to receive(:rd_kafka_DeleteTopics).and_raise(RuntimeError.new("oops")) - end - - it "raises an exception" do - expect { - admin.delete_topic(topic_name) - }.to raise_error RuntimeError, /oops/ - end - end - end - - it "deletes a topic that was newly created" do - create_topic_handle = admin.create_topic(topic_name, topic_partition_count, topic_replication_factor) - create_topic_report = create_topic_handle.wait(max_wait_timeout_ms: 15_000) - expect(create_topic_report.error_string).to be_nil - expect(create_topic_report.result_name).to eq(topic_name) - - # Retry topic deletion a few times. On CI Kafka seems to not - # always be ready for it immediately - delete_topic_report = nil - 10.times do |i| - delete_topic_handle = admin.delete_topic(topic_name) - delete_topic_report = delete_topic_handle.wait(max_wait_timeout_ms: 15_000) - break - rescue Rdkafka::RdkafkaError => ex - if i > 3 - raise ex - end - end - - expect(delete_topic_report.error_string).to be_nil - expect(delete_topic_report.result_name).to eq(topic_name) - end - end - - describe "#ACL tests for topic resource" do - let(:non_existing_resource_name) { "non-existing-topic" } - - before do - # create topic for testing acl - create_topic_handle = admin.create_topic(resource_name, topic_partition_count, topic_replication_factor) - create_topic_handle.wait(max_wait_timeout_ms: 15_000) - end - - after do - # delete acl - delete_acl_handle = admin.delete_acl(resource_type: resource_type, resource_name: resource_name, resource_pattern_type: resource_pattern_type, principal: principal, host: host, operation: operation, permission_type: permission_type) - delete_acl_handle.wait(max_wait_timeout_ms: 15_000) - - # delete topic that was created for testing acl - delete_topic_handle = admin.delete_topic(resource_name) - delete_topic_handle.wait(max_wait_timeout_ms: 15_000) - end - - describe "#create_acl" do - it "create acl for a topic that does not exist" do - # acl creation for resources that does not exist will still get created successfully. - create_acl_handle = admin.create_acl(resource_type: resource_type, resource_name: non_existing_resource_name, resource_pattern_type: resource_pattern_type, principal: principal, host: host, operation: operation, permission_type: permission_type) - create_acl_report = create_acl_handle.wait(max_wait_timeout_ms: 15_000) - expect(create_acl_report.rdkafka_response).to eq(0) - expect(create_acl_report.rdkafka_response_string).to eq("") - - # delete the acl that was created for a non existing topic" - delete_acl_handle = admin.delete_acl(resource_type: resource_type, resource_name: non_existing_resource_name, resource_pattern_type: resource_pattern_type, principal: principal, host: host, operation: operation, permission_type: permission_type) - delete_acl_report = delete_acl_handle.wait(max_wait_timeout_ms: 15_000) - expect(delete_acl_handle[:response]).to eq(0) - expect(delete_acl_report.deleted_acls.size).to eq(1) - end - - it "creates a acl for topic that was newly created" do - create_acl_handle = admin.create_acl(resource_type: resource_type, resource_name: resource_name, resource_pattern_type: resource_pattern_type, principal: principal, host: host, operation: operation, permission_type: permission_type) - create_acl_report = create_acl_handle.wait(max_wait_timeout_ms: 15_000) - expect(create_acl_report.rdkafka_response).to eq(0) - expect(create_acl_report.rdkafka_response_string).to eq("") - end - end - - describe "#describe_acl" do - it "describe acl of a topic that does not exist" do - describe_acl_handle = admin.describe_acl(resource_type: resource_type, resource_name: non_existing_resource_name, resource_pattern_type: resource_pattern_type, principal: principal, host: host, operation: operation, permission_type: permission_type) - describe_acl_report = describe_acl_handle.wait(max_wait_timeout_ms: 15_000) - expect(describe_acl_handle[:response]).to eq(0) - expect(describe_acl_report.acls.size).to eq(0) - end - - it "create acls and describe the newly created acls" do - # create_acl - create_acl_handle = admin.create_acl(resource_type: resource_type, resource_name: "test_acl_topic_1", resource_pattern_type: resource_pattern_type, principal: principal, host: host, operation: operation, permission_type: permission_type) - create_acl_report = create_acl_handle.wait(max_wait_timeout_ms: 15_000) - expect(create_acl_report.rdkafka_response).to eq(0) - expect(create_acl_report.rdkafka_response_string).to eq("") - - create_acl_handle = admin.create_acl(resource_type: resource_type, resource_name: "test_acl_topic_2", resource_pattern_type: resource_pattern_type, principal: principal, host: host, operation: operation, permission_type: permission_type) - create_acl_report = create_acl_handle.wait(max_wait_timeout_ms: 15_000) - expect(create_acl_report.rdkafka_response).to eq(0) - expect(create_acl_report.rdkafka_response_string).to eq("") - - # Since we create and immediately check, this is slow on loaded CIs, hence we wait - sleep(2) - - # describe_acl - describe_acl_handle = admin.describe_acl(resource_type: Rdkafka::Bindings::RD_KAFKA_RESOURCE_ANY, resource_name: nil, resource_pattern_type: Rdkafka::Bindings::RD_KAFKA_RESOURCE_PATTERN_ANY, principal: nil, host: nil, operation: Rdkafka::Bindings::RD_KAFKA_ACL_OPERATION_ANY, permission_type: Rdkafka::Bindings::RD_KAFKA_ACL_PERMISSION_TYPE_ANY) - describe_acl_report = describe_acl_handle.wait(max_wait_timeout_ms: 15_000) - expect(describe_acl_handle[:response]).to eq(0) - expect(describe_acl_report.acls.length).to eq(2) - end - end - - describe "#delete_acl" do - it "delete acl of a topic that does not exist" do - delete_acl_handle = admin.delete_acl(resource_type: resource_type, resource_name: non_existing_resource_name, resource_pattern_type: resource_pattern_type, principal: principal, host: host, operation: operation, permission_type: permission_type) - delete_acl_report = delete_acl_handle.wait(max_wait_timeout_ms: 15_000) - expect(delete_acl_handle[:response]).to eq(0) - expect(delete_acl_report.deleted_acls.size).to eq(0) - end - - it "create an acl and delete the newly created acl" do - # create_acl - create_acl_handle = admin.create_acl(resource_type: resource_type, resource_name: "test_acl_topic_1", resource_pattern_type: resource_pattern_type, principal: principal, host: host, operation: operation, permission_type: permission_type) - create_acl_report = create_acl_handle.wait(max_wait_timeout_ms: 15_000) - expect(create_acl_report.rdkafka_response).to eq(0) - expect(create_acl_report.rdkafka_response_string).to eq("") - - create_acl_handle = admin.create_acl(resource_type: resource_type, resource_name: "test_acl_topic_2", resource_pattern_type: resource_pattern_type, principal: principal, host: host, operation: operation, permission_type: permission_type) - create_acl_report = create_acl_handle.wait(max_wait_timeout_ms: 15_000) - expect(create_acl_report.rdkafka_response).to eq(0) - expect(create_acl_report.rdkafka_response_string).to eq("") - - # delete_acl - resource_name nil - to delete all acls with any resource name and matching all other filters. - delete_acl_handle = admin.delete_acl(resource_type: resource_type, resource_name: nil, resource_pattern_type: resource_pattern_type, principal: principal, host: host, operation: operation, permission_type: permission_type) - delete_acl_report = delete_acl_handle.wait(max_wait_timeout_ms: 15_000) - expect(delete_acl_handle[:response]).to eq(0) - expect(delete_acl_report.deleted_acls.length).to eq(2) - end - end - end - - describe "#ACL tests for transactional_id" do - let(:transactional_id_resource_name) { "test-transactional-id" } - let(:non_existing_transactional_id) { "non-existing-transactional-id" } - let(:transactional_id_resource_type) { Rdkafka::Bindings::RD_KAFKA_RESOURCE_TRANSACTIONAL_ID } - let(:transactional_id_resource_pattern_type) { Rdkafka::Bindings::RD_KAFKA_RESOURCE_PATTERN_LITERAL } - let(:transactional_id_principal) { "User:test-user" } - let(:transactional_id_host) { "*" } - let(:transactional_id_operation) { Rdkafka::Bindings::RD_KAFKA_ACL_OPERATION_WRITE } - let(:transactional_id_permission_type) { Rdkafka::Bindings::RD_KAFKA_ACL_PERMISSION_TYPE_ALLOW } - - after do - # Clean up any ACLs that might have been created during tests - - delete_acl_handle = admin.delete_acl( - resource_type: transactional_id_resource_type, - resource_name: nil, - resource_pattern_type: transactional_id_resource_pattern_type, - principal: transactional_id_principal, - host: transactional_id_host, - operation: transactional_id_operation, - permission_type: transactional_id_permission_type - ) - delete_acl_handle.wait(max_wait_timeout_ms: 15_000) - rescue - # Ignore cleanup errors - end - - describe "#create_acl" do - it "creates acl for a transactional_id" do - create_acl_handle = admin.create_acl( - resource_type: transactional_id_resource_type, - resource_name: transactional_id_resource_name, - resource_pattern_type: transactional_id_resource_pattern_type, - principal: transactional_id_principal, - host: transactional_id_host, - operation: transactional_id_operation, - permission_type: transactional_id_permission_type - ) - create_acl_report = create_acl_handle.wait(max_wait_timeout_ms: 15_000) - expect(create_acl_report.rdkafka_response).to eq(0) - expect(create_acl_report.rdkafka_response_string).to eq("") - end - - it "creates acl for a non-existing transactional_id" do - # ACL creation for transactional_ids that don't exist will still get created successfully - create_acl_handle = admin.create_acl( - resource_type: transactional_id_resource_type, - resource_name: non_existing_transactional_id, - resource_pattern_type: transactional_id_resource_pattern_type, - principal: transactional_id_principal, - host: transactional_id_host, - operation: transactional_id_operation, - permission_type: transactional_id_permission_type - ) - create_acl_report = create_acl_handle.wait(max_wait_timeout_ms: 15_000) - expect(create_acl_report.rdkafka_response).to eq(0) - expect(create_acl_report.rdkafka_response_string).to eq("") - - # Clean up the ACL that was created for the non-existing transactional_id - delete_acl_handle = admin.delete_acl( - resource_type: transactional_id_resource_type, - resource_name: non_existing_transactional_id, - resource_pattern_type: transactional_id_resource_pattern_type, - principal: transactional_id_principal, - host: transactional_id_host, - operation: transactional_id_operation, - permission_type: transactional_id_permission_type - ) - delete_acl_report = delete_acl_handle.wait(max_wait_timeout_ms: 15_000) - expect(delete_acl_handle[:response]).to eq(0) - expect(delete_acl_report.deleted_acls.size).to eq(1) - end - end - - describe "#describe_acl" do - it "describes acl of a transactional_id that does not exist" do - describe_acl_handle = admin.describe_acl( - resource_type: transactional_id_resource_type, - resource_name: non_existing_transactional_id, - resource_pattern_type: transactional_id_resource_pattern_type, - principal: transactional_id_principal, - host: transactional_id_host, - operation: transactional_id_operation, - permission_type: transactional_id_permission_type - ) - describe_acl_report = describe_acl_handle.wait(max_wait_timeout_ms: 15_000) - expect(describe_acl_handle[:response]).to eq(0) - expect(describe_acl_report.acls.size).to eq(0) - end - - it "creates acls and describes the newly created transactional_id acls" do - # Create first ACL - create_acl_handle = admin.create_acl( - resource_type: transactional_id_resource_type, - resource_name: "test_transactional_id_1", - resource_pattern_type: transactional_id_resource_pattern_type, - principal: transactional_id_principal, - host: transactional_id_host, - operation: transactional_id_operation, - permission_type: transactional_id_permission_type - ) - create_acl_report = create_acl_handle.wait(max_wait_timeout_ms: 15_000) - expect(create_acl_report.rdkafka_response).to eq(0) - expect(create_acl_report.rdkafka_response_string).to eq("") - - # Create second ACL - create_acl_handle = admin.create_acl( - resource_type: transactional_id_resource_type, - resource_name: "test_transactional_id_2", - resource_pattern_type: transactional_id_resource_pattern_type, - principal: transactional_id_principal, - host: transactional_id_host, - operation: transactional_id_operation, - permission_type: transactional_id_permission_type - ) - create_acl_report = create_acl_handle.wait(max_wait_timeout_ms: 15_000) - expect(create_acl_report.rdkafka_response).to eq(0) - expect(create_acl_report.rdkafka_response_string).to eq("") - - # Since we create and immediately check, this is slow on loaded CIs, hence we wait - sleep(2) - - # Describe ACLs - filter by transactional_id resource type - describe_acl_handle = admin.describe_acl( - resource_type: transactional_id_resource_type, - resource_name: nil, - resource_pattern_type: Rdkafka::Bindings::RD_KAFKA_RESOURCE_PATTERN_ANY, - principal: transactional_id_principal, - host: transactional_id_host, - operation: transactional_id_operation, - permission_type: transactional_id_permission_type - ) - describe_acl_report = describe_acl_handle.wait(max_wait_timeout_ms: 15_000) - expect(describe_acl_handle[:response]).to eq(0) - expect(describe_acl_report.acls.length).to eq(2) - end - end - - describe "#delete_acl" do - it "deletes acl of a transactional_id that does not exist" do - delete_acl_handle = admin.delete_acl( - resource_type: transactional_id_resource_type, - resource_name: non_existing_transactional_id, - resource_pattern_type: transactional_id_resource_pattern_type, - principal: transactional_id_principal, - host: transactional_id_host, - operation: transactional_id_operation, - permission_type: transactional_id_permission_type - ) - delete_acl_report = delete_acl_handle.wait(max_wait_timeout_ms: 15_000) - expect(delete_acl_handle[:response]).to eq(0) - expect(delete_acl_report.deleted_acls.size).to eq(0) - end - - it "creates transactional_id acls and deletes the newly created acls" do - # Create first ACL - create_acl_handle = admin.create_acl( - resource_type: transactional_id_resource_type, - resource_name: "test_transactional_id_1", - resource_pattern_type: transactional_id_resource_pattern_type, - principal: transactional_id_principal, - host: transactional_id_host, - operation: transactional_id_operation, - permission_type: transactional_id_permission_type - ) - create_acl_report = create_acl_handle.wait(max_wait_timeout_ms: 15_000) - expect(create_acl_report.rdkafka_response).to eq(0) - expect(create_acl_report.rdkafka_response_string).to eq("") - - # Create second ACL - create_acl_handle = admin.create_acl( - resource_type: transactional_id_resource_type, - resource_name: "test_transactional_id_2", - resource_pattern_type: transactional_id_resource_pattern_type, - principal: transactional_id_principal, - host: transactional_id_host, - operation: transactional_id_operation, - permission_type: transactional_id_permission_type - ) - create_acl_report = create_acl_handle.wait(max_wait_timeout_ms: 15_000) - expect(create_acl_report.rdkafka_response).to eq(0) - expect(create_acl_report.rdkafka_response_string).to eq("") - - # Delete ACLs - resource_name nil to delete all ACLs with any resource name and matching all other filters - delete_acl_handle = admin.delete_acl( - resource_type: transactional_id_resource_type, - resource_name: nil, - resource_pattern_type: transactional_id_resource_pattern_type, - principal: transactional_id_principal, - host: transactional_id_host, - operation: transactional_id_operation, - permission_type: transactional_id_permission_type - ) - delete_acl_report = delete_acl_handle.wait(max_wait_timeout_ms: 15_000) - expect(delete_acl_handle[:response]).to eq(0) - expect(delete_acl_report.deleted_acls.length).to eq(2) - end - end - end - - describe("Group tests") do - describe "#delete_group" do - describe("with an existing group") do - let(:consumer_config) { rdkafka_consumer_config("group.id": group_name) } - let(:producer_config) { rdkafka_producer_config } - let(:producer) { producer_config.producer } - let(:consumer) { consumer_config.consumer } - - before do - # Create a topic, post a message to it, consume it and commit offsets, this will create a group that we can then delete. - admin.create_topic(topic_name, topic_partition_count, topic_replication_factor).wait(max_wait_timeout_ms: 15_000) - - producer.produce(topic: topic_name, payload: "test", key: "test").wait(max_wait_timeout_ms: 15_000) - - consumer.subscribe(topic_name) - wait_for_assignment(consumer) - message = nil - - 10.times do - message ||= consumer.poll(100) - end - - expect(message).not_to be_nil - - consumer.commit - consumer.close - end - - after do - producer.close - consumer.close - end - - it "deletes the group" do - delete_group_handle = admin.delete_group(group_name) - report = delete_group_handle.wait(max_wait_timeout_ms: 15_000) - - expect(report.result_name).to eql(group_name) - end - end - - describe "called with invalid input" do - describe "with the name of a group that does not exist" do - it "raises an exception" do - delete_group_handle = admin.delete_group(group_name) - - expect { - delete_group_handle.wait(max_wait_timeout_ms: 15_000) - }.to raise_exception { |ex| - expect(ex).to be_a(Rdkafka::RdkafkaError) - expect(ex.message).to match(/group_id_not_found|not_coordinator/) - } - end - end - end - end - end - - describe "#create_partitions" do - let(:metadata) do - admin.metadata(topic_name).topics.first - rescue Rdkafka::RdkafkaError - # We have to wait because if we query too fast after topic creation request, it may not - # yet be available throwing an error. - # This occurs mostly on slow CIs - sleep(1) - admin.metadata(topic_name).topics.first - end - - context "when topic does not exist" do - it "expect to fail due to unknown partition" do - expect { admin.create_partitions(topic_name, 10).wait }.to raise_error(Rdkafka::RdkafkaError, /unknown_topic_or_part/) - end - end - - context "when topic already has the desired number of partitions" do - before { admin.create_topic(topic_name, 2, 1).wait } - - it "expect not to change number of partitions" do - expect { admin.create_partitions(topic_name, 2).wait }.to raise_error(Rdkafka::RdkafkaError, /invalid_partitions/) - expect(metadata[:partition_count]).to eq(2) - end - end - - context "when topic has more than the requested number of partitions" do - before { admin.create_topic(topic_name, 5, 1).wait } - - it "expect not to change number of partitions" do - expect { admin.create_partitions(topic_name, 2).wait }.to raise_error(Rdkafka::RdkafkaError, /invalid_partitions/) - # On slow CI this may propagate, thus we wait a bit - sleep(1) - expect(metadata[:partition_count]).to eq(5) - end - end - - context "when topic has less then desired number of partitions" do - before do - admin.create_topic(topic_name, 1, 1).wait - sleep(1) - end - - it "expect to change number of partitions" do - admin.create_partitions(topic_name, 10).wait - sleep(1) - expect(metadata[:partition_count]).to eq(10) - end - end - end - - describe "#oauthbearer_set_token" do - context "when sasl not configured" do - it "returns RD_KAFKA_RESP_ERR__STATE" do - response = admin.oauthbearer_set_token( - token: "foo", - lifetime_ms: Time.now.to_i * 1000 + 900 * 1000, - principal_name: "kafka-cluster" - ) - expect(response).to eq(Rdkafka::Bindings::RD_KAFKA_RESP_ERR__STATE) - end - end - - context "when sasl configured" do - before do - config_sasl = rdkafka_config( - "security.protocol": "sasl_ssl", - "sasl.mechanisms": "OAUTHBEARER" - ) - $admin_sasl = config_sasl.admin - end - - after do - $admin_sasl.close - end - - context "without extensions" do - it "succeeds" do - response = $admin_sasl.oauthbearer_set_token( - token: "foo", - lifetime_ms: Time.now.to_i * 1000 + 900 * 1000, - principal_name: "kafka-cluster" - ) - expect(response).to eq(0) - end - end - - context "with extensions" do - it "succeeds" do - response = $admin_sasl.oauthbearer_set_token( - token: "foo", - lifetime_ms: Time.now.to_i * 1000 + 900 * 1000, - principal_name: "kafka-cluster", - extensions: { - "foo" => "bar" - } - ) - expect(response).to eq(0) - end - end - end - end - - describe "#events_poll_nb_each" do - it "does not raise when queue is empty" do - expect { admin.events_poll_nb_each { |_| } }.not_to raise_error - end - - it "yields the count after each poll" do - counts = [] - # Stub to return events, then zero - call_count = 0 - allow(Rdkafka::Bindings).to receive(:rd_kafka_poll_nb) do - call_count += 1 - (call_count <= 2) ? 1 : 0 - end - - admin.events_poll_nb_each { |count| counts << count } - - expect(counts).to eq([1, 1]) - end - - it "stops when block returns :stop" do - iterations = 0 - # Stub to always return events - allow(Rdkafka::Bindings).to receive(:rd_kafka_poll_nb).and_return(1) - - admin.events_poll_nb_each do |_count| - iterations += 1 - :stop if iterations >= 3 - end - - expect(iterations).to eq(3) - end - - context "when admin is closed" do - before { admin.close } - - it "raises ClosedAdminError" do - expect { admin.events_poll_nb_each { |_| } }.to raise_error(Rdkafka::ClosedAdminError, /events_poll_nb_each/) - end - end - end - - describe "file descriptor access for fiber scheduler integration" do - let(:admin) { config.admin(run_polling_thread: false) } - - it "enables IO events on admin queue" do - signal_r, signal_w = IO.pipe - expect { admin.enable_queue_io_events(signal_w.fileno) }.not_to raise_error - signal_r.close - signal_w.close - end - - it "enables IO events on background queue" do - signal_r, signal_w = IO.pipe - expect { admin.enable_background_queue_io_events(signal_w.fileno) }.not_to raise_error - signal_r.close - signal_w.close - end - - context "when admin is closed" do - before { admin.close } - - it "raises ClosedInnerError when enabling queue_io_events" do - signal_r, signal_w = IO.pipe - expect { admin.enable_queue_io_events(signal_w.fileno) }.to raise_error(Rdkafka::ClosedInnerError) - signal_r.close - signal_w.close - end - - it "raises ClosedInnerError when enabling background_queue_io_events" do - signal_r, signal_w = IO.pipe - expect { admin.enable_background_queue_io_events(signal_w.fileno) }.to raise_error(Rdkafka::ClosedInnerError) - signal_r.close - signal_w.close - end - end - end - - unless RUBY_PLATFORM == "java" - context "when operating from a fork" do - # @see https://github.com/ffi/ffi/issues/1114 - it "expect to be able to create topics and run other admin operations without hanging" do - # If the FFI issue is not mitigated, this will hang forever - pid = fork do - admin - .create_topic(topic_name, topic_partition_count, topic_replication_factor) - .wait - end - - Process.wait(pid) - end - end - end -end diff --git a/spec/lib/rdkafka/callbacks_spec.rb b/spec/lib/rdkafka/callbacks_spec.rb deleted file mode 100644 index 8e9b8f90..00000000 --- a/spec/lib/rdkafka/callbacks_spec.rb +++ /dev/null @@ -1 +0,0 @@ -# frozen_string_literal: true diff --git a/spec/lib/rdkafka/consumer/headers_spec.rb b/spec/lib/rdkafka/consumer/headers_spec.rb deleted file mode 100644 index 324873aa..00000000 --- a/spec/lib/rdkafka/consumer/headers_spec.rb +++ /dev/null @@ -1,73 +0,0 @@ -# frozen_string_literal: true - -RSpec.describe Rdkafka::Consumer::Headers do - let(:expected_headers) do - { # Note String keys! - "version" => ["2.1.3", "2.1.4"], - "type" => "String" - } - end - let(:native_message) { double("native message") } - let(:headers_ptr) { double("headers pointer") } - - describe ".from_native" do - let(:headers) { described_class.from_native(native_message) } - - before do - expect(Rdkafka::Bindings).to receive(:rd_kafka_message_headers).with(native_message, anything) do |_, headers_ptrptr| - expect(headers_ptrptr).to receive(:read_pointer).and_return(headers_ptr) - Rdkafka::Bindings::RD_KAFKA_RESP_ERR_NO_ERROR - end - - # First version header - expect(Rdkafka::Bindings).to \ - receive(:rd_kafka_header_get_all) - .with(headers_ptr, 0, anything, anything, anything) do |_, _, name_ptrptr, value_ptrptr, size_ptr| - expect(name_ptrptr).to receive(:read_pointer).and_return(double("pointer 0", read_string_to_null: "version")) - expect(size_ptr).to receive(:[]).with(:value).and_return(expected_headers["version"][0].size) - expect(value_ptrptr).to receive(:read_pointer).and_return(double("value pointer 0", read_string: expected_headers["version"][0])) - Rdkafka::Bindings::RD_KAFKA_RESP_ERR_NO_ERROR - end - - # Second version header - expect(Rdkafka::Bindings).to \ - receive(:rd_kafka_header_get_all) - .with(headers_ptr, 1, anything, anything, anything) do |_, _, name_ptrptr, value_ptrptr, size_ptr| - expect(name_ptrptr).to receive(:read_pointer).and_return(double("pointer 1", read_string_to_null: "version")) - expect(size_ptr).to receive(:[]).with(:value).and_return(expected_headers["version"][1].size) - expect(value_ptrptr).to receive(:read_pointer).and_return(double("value pointer 1", read_string: expected_headers["version"][1])) - Rdkafka::Bindings::RD_KAFKA_RESP_ERR_NO_ERROR - end - - # Single type header - expect(Rdkafka::Bindings).to \ - receive(:rd_kafka_header_get_all) - .with(headers_ptr, 2, anything, anything, anything) do |_, _, name_ptrptr, value_ptrptr, size_ptr| - expect(name_ptrptr).to receive(:read_pointer).and_return(double("pointer 2", read_string_to_null: "type")) - expect(size_ptr).to receive(:[]).with(:value).and_return(expected_headers["type"].size) - expect(value_ptrptr).to receive(:read_pointer).and_return(double("value pointer 2", read_string: expected_headers["type"])) - Rdkafka::Bindings::RD_KAFKA_RESP_ERR_NO_ERROR - end - - expect(Rdkafka::Bindings).to \ - receive(:rd_kafka_header_get_all) - .with(headers_ptr, 3, anything, anything, anything) - .and_return(Rdkafka::Bindings::RD_KAFKA_RESP_ERR__NOENT) - end - - it { expect(headers).to eq(expected_headers) } - it { expect(headers).to be_frozen } - - it "returns array for duplicate headers" do - expect(headers["version"]).to eq(["2.1.3", "2.1.4"]) - end - - it "returns string for single headers" do - expect(headers["type"]).to eq("String") - end - - it "does not support symbols mappings" do - expect(headers.key?(:version)).to be(false) - end - end -end diff --git a/spec/lib/rdkafka/consumer/message_spec.rb b/spec/lib/rdkafka/consumer/message_spec.rb deleted file mode 100644 index f2454885..00000000 --- a/spec/lib/rdkafka/consumer/message_spec.rb +++ /dev/null @@ -1,137 +0,0 @@ -# frozen_string_literal: true - -RSpec.describe Rdkafka::Consumer::Message do - let(:message) { described_class.new(native_message) } - - let(:native_client) { new_native_client } - let(:native_topic) { new_native_topic(native_client: native_client) } - let(:payload) { nil } - let(:key) { nil } - let(:native_message) do - Rdkafka::Bindings::Message.new.tap do |message| - message[:rkt] = native_topic - message[:partition] = 3 - message[:offset] = 100 - if payload - ptr = FFI::MemoryPointer.new(:char, payload.bytesize) - ptr.put_bytes(0, payload) - message[:payload] = ptr - message[:len] = payload.bytesize - end - if key - ptr = FFI::MemoryPointer.new(:char, key.bytesize) - ptr.put_bytes(0, key) - message[:key] = ptr - message[:key_len] = key.bytesize - end - end - end - - after do - Rdkafka::Bindings.rd_kafka_destroy(native_client) - end - - before do - # mock headers, because it produces 'segmentation fault' while settings or reading headers for - # a message which is created from scratch - # - # Code dump example: - # - # ``` - # frame #7: 0x000000010dacf5ab librdkafka.dylib`rd_list_destroy + 11 - # frame #8: 0x000000010dae5a7e librdkafka.dylib`rd_kafka_headers_destroy + 14 - # frame #9: 0x000000010da9ab40 librdkafka.dylib`rd_kafka_message_set_headers + 32 - # ``` - expect(Rdkafka::Bindings).to receive(:rd_kafka_message_headers).with(any_args).and_return(Rdkafka::Bindings::RD_KAFKA_RESP_ERR__NOENT) - end - - it "has a topic" do - expect(message.topic).to eq "topic_name" - end - - it "has a partition" do - expect(message.partition).to eq 3 - end - - context "payload" do - it "has a nil payload when none is present" do - expect(message.payload).to be_nil - end - - context "present payload" do - let(:payload) { "payload content" } - - it "has a payload" do - expect(message.payload).to eq "payload content" - end - end - end - - context "key" do - it "has a nil key when none is present" do - expect(message.key).to be_nil - end - - context "present key" do - let(:key) { "key content" } - - it "has a key" do - expect(message.key).to eq "key content" - end - end - end - - it "has an offset" do - expect(message.offset).to eq 100 - end - - describe "#timestamp" do - context "without a timestamp" do - before do - allow(Rdkafka::Bindings).to receive(:rd_kafka_message_timestamp).and_return(-1) - end - - it "has a nil timestamp if not present" do - expect(message.timestamp).to be_nil - end - end - - context "with a timestamp" do - before do - allow(Rdkafka::Bindings).to receive(:rd_kafka_message_timestamp).and_return(1505069646250) - end - - it "has timestamp if present" do - expect(message.timestamp).to eq Time.at(1505069646, 250_000) - end - end - end - - describe "#to_s" do - before do - allow(message).to receive(:timestamp).and_return(1000) - end - - it "has a human readable representation" do - expect(message.to_s).to eq "" - end - - context "with key and payload" do - let(:key) { "key" } - let(:payload) { "payload" } - - it "has a human readable representation" do - expect(message.to_s).to eq "" - end - end - - context "with a very long key and payload" do - let(:key) { "k" * 100_000 } - let(:payload) { "p" * 100_000 } - - it "has a human readable representation" do - expect(message.to_s).to eq "" - end - end - end -end diff --git a/spec/lib/rdkafka/consumer/partition_spec.rb b/spec/lib/rdkafka/consumer/partition_spec.rb deleted file mode 100644 index 2082d044..00000000 --- a/spec/lib/rdkafka/consumer/partition_spec.rb +++ /dev/null @@ -1,58 +0,0 @@ -# frozen_string_literal: true - -RSpec.describe Rdkafka::Consumer::Partition do - let(:partition) { described_class.new(1, offset, err) } - - let(:offset) { 100 } - let(:err) { 0 } - - it "has a partition" do - expect(partition.partition).to eq 1 - end - - it "has an offset" do - expect(partition.offset).to eq 100 - end - - it "has an err code" do - expect(partition.err).to eq 0 - end - - describe "#to_s" do - it "returns a human readable representation" do - expect(partition.to_s).to eq "" - end - end - - describe "#inspect" do - it "returns a human readable representation" do - expect(partition.to_s).to eq "" - end - - context "without offset" do - let(:offset) { nil } - - it "returns a human readable representation" do - expect(partition.to_s).to eq "" - end - end - - context "with err code" do - let(:err) { 1 } - - it "returns a human readable representation" do - expect(partition.to_s).to eq "" - end - end - end - - describe "#==" do - it "equals another partition with the same content" do - expect(partition).to eq described_class.new(1, 100) - end - - it "does not equal another partition with different content" do - expect(partition).not_to eq described_class.new(2, 101) - end - end -end diff --git a/spec/lib/rdkafka/defaults_spec.rb b/spec/lib/rdkafka/defaults_spec.rb deleted file mode 100644 index a5196b64..00000000 --- a/spec/lib/rdkafka/defaults_spec.rb +++ /dev/null @@ -1,113 +0,0 @@ -# frozen_string_literal: true - -RSpec.describe Rdkafka::Defaults do - describe "consumer timeouts" do - it "defines CONSUMER_COMMITTED_TIMEOUT_MS as 2000" do - expect(described_class::CONSUMER_COMMITTED_TIMEOUT_MS).to eq(2_000) - end - - it "defines CONSUMER_QUERY_WATERMARK_TIMEOUT_MS as 1000" do - expect(described_class::CONSUMER_QUERY_WATERMARK_TIMEOUT_MS).to eq(1_000) - end - - it "defines CONSUMER_LAG_TIMEOUT_MS as 1000" do - expect(described_class::CONSUMER_LAG_TIMEOUT_MS).to eq(1_000) - end - - it "defines CONSUMER_OFFSETS_FOR_TIMES_TIMEOUT_MS as 1000" do - expect(described_class::CONSUMER_OFFSETS_FOR_TIMES_TIMEOUT_MS).to eq(1_000) - end - - it "defines CONSUMER_POLL_TIMEOUT_MS as 250" do - expect(described_class::CONSUMER_POLL_TIMEOUT_MS).to eq(250) - end - end - - describe "producer timeouts" do - it "defines PRODUCER_FLUSH_TIMEOUT_MS as 5000" do - expect(described_class::PRODUCER_FLUSH_TIMEOUT_MS).to eq(5_000) - end - - it "defines PRODUCER_PURGE_FLUSH_TIMEOUT_MS as 100" do - expect(described_class::PRODUCER_PURGE_FLUSH_TIMEOUT_MS).to eq(100) - end - end - - describe "metadata timeouts" do - it "defines METADATA_TIMEOUT_MS as 2000" do - expect(described_class::METADATA_TIMEOUT_MS).to eq(2_000) - end - end - - describe "handle timeouts" do - it "defines HANDLE_WAIT_TIMEOUT_MS as 60000" do - expect(described_class::HANDLE_WAIT_TIMEOUT_MS).to eq(60_000) - end - end - - describe "native kafka polling" do - it "defines NATIVE_KAFKA_POLL_TIMEOUT_MS as 100" do - expect(described_class::NATIVE_KAFKA_POLL_TIMEOUT_MS).to eq(100) - end - end - - describe "internal timing" do - it "defines PRODUCER_PURGE_SLEEP_INTERVAL_MS as 1" do - expect(described_class::PRODUCER_PURGE_SLEEP_INTERVAL_MS).to eq(1) - end - - it "defines NATIVE_KAFKA_SYNCHRONIZE_SLEEP_INTERVAL_MS as 10" do - expect(described_class::NATIVE_KAFKA_SYNCHRONIZE_SLEEP_INTERVAL_MS).to eq(10) - end - - it "defines METADATA_RETRY_BACKOFF_BASE_MS as 100" do - expect(described_class::METADATA_RETRY_BACKOFF_BASE_MS).to eq(100) - end - - it "defines METADATA_MAX_RETRIES as 10" do - expect(described_class::METADATA_MAX_RETRIES).to eq(10) - end - - it "defines CONSUMER_SEEK_TIMEOUT_MS as 0" do - expect(described_class::CONSUMER_SEEK_TIMEOUT_MS).to eq(0) - end - - it "defines CONSUMER_EVENTS_POLL_TIMEOUT_MS as 0" do - expect(described_class::CONSUMER_EVENTS_POLL_TIMEOUT_MS).to eq(0) - end - end - - describe "cache settings" do - it "defines PARTITIONS_COUNT_CACHE_TTL_MS as 30000" do - expect(described_class::PARTITIONS_COUNT_CACHE_TTL_MS).to eq(30_000) - end - end - - describe "immutability" do - it "all constants are frozen" do - # Numeric constants are inherently immutable in Ruby, but let's verify - # that they are properly defined as constants (not class variables) - constants = described_class.constants - expect(constants).not_to be_empty - expect(constants).to include( - :CONSUMER_COMMITTED_TIMEOUT_MS, - :CONSUMER_QUERY_WATERMARK_TIMEOUT_MS, - :CONSUMER_LAG_TIMEOUT_MS, - :CONSUMER_OFFSETS_FOR_TIMES_TIMEOUT_MS, - :CONSUMER_POLL_TIMEOUT_MS, - :PRODUCER_FLUSH_TIMEOUT_MS, - :PRODUCER_PURGE_FLUSH_TIMEOUT_MS, - :METADATA_TIMEOUT_MS, - :HANDLE_WAIT_TIMEOUT_MS, - :NATIVE_KAFKA_POLL_TIMEOUT_MS, - :PRODUCER_PURGE_SLEEP_INTERVAL_MS, - :NATIVE_KAFKA_SYNCHRONIZE_SLEEP_INTERVAL_MS, - :METADATA_RETRY_BACKOFF_BASE_MS, - :METADATA_MAX_RETRIES, - :CONSUMER_SEEK_TIMEOUT_MS, - :CONSUMER_EVENTS_POLL_TIMEOUT_MS, - :PARTITIONS_COUNT_CACHE_TTL_MS - ) - end - end -end diff --git a/spec/lib/rdkafka/error_spec.rb b/spec/lib/rdkafka/error_spec.rb deleted file mode 100644 index 87382298..00000000 --- a/spec/lib/rdkafka/error_spec.rb +++ /dev/null @@ -1,132 +0,0 @@ -# frozen_string_literal: true - -RSpec.describe Rdkafka::RdkafkaError do - it "raises a type error for a nil response" do - expect { - described_class.new(nil) - }.to raise_error TypeError - end - - it "creates an error with a message prefix" do - expect(described_class.new(10, "message prefix").message_prefix).to eq "message prefix" - end - - it "creates an error with a broker message" do - expect(described_class.new(10, broker_message: "broker message").broker_message).to eq "broker message" - end - - it "creates an error with an instance name" do - expect(described_class.new(10, instance_name: "rdkafka#producer-1").instance_name).to eq "rdkafka#producer-1" - end - - it "defaults instance_name to nil" do - expect(described_class.new(10).instance_name).to be_nil - end - - describe "#code" do - it "handles an invalid response" do - expect(described_class.new(933975).code).to eq :err_933975? - end - - it "returns error messages from rdkafka" do - expect(described_class.new(10).code).to eq :msg_size_too_large - end - - it "strips a leading underscore" do - expect(described_class.new(-191).code).to eq :partition_eof - end - end - - describe "#to_s" do - it "handles an invalid response" do - expect(described_class.new(933975).to_s).to eq "Err-933975? (err_933975?)" - end - - it "returns error messages from rdkafka" do - expect(described_class.new(10).to_s).to eq "Broker: Message size too large (msg_size_too_large)" - end - - it "adds the message prefix if present" do - expect(described_class.new(10, "Error explanation").to_s).to eq "Error explanation - Broker: Message size too large (msg_size_too_large)" - end - - it "adds the instance name if present" do - expect(described_class.new(10, instance_name: "rdkafka#producer-1").to_s).to eq "Broker: Message size too large (msg_size_too_large) [rdkafka#producer-1]" - end - - it "adds both message prefix and instance name if present" do - expect(described_class.new(10, "Error explanation", instance_name: "rdkafka#producer-1").to_s).to eq "Error explanation - Broker: Message size too large (msg_size_too_large) [rdkafka#producer-1]" - end - end - - describe "#message" do - it "handles an invalid response" do - expect(described_class.new(933975).message).to eq "Err-933975? (err_933975?)" - end - - it "returns error messages from rdkafka" do - expect(described_class.new(10).message).to eq "Broker: Message size too large (msg_size_too_large)" - end - - it "adds the message prefix if present" do - expect(described_class.new(10, "Error explanation").message).to eq "Error explanation - Broker: Message size too large (msg_size_too_large)" - end - - it "adds the instance name if present" do - expect(described_class.new(10, instance_name: "rdkafka#producer-1").message).to eq "Broker: Message size too large (msg_size_too_large) [rdkafka#producer-1]" - end - end - - describe "#is_partition_eof?" do - it "is false when not partition eof" do - expect(described_class.new(933975).is_partition_eof?).to be false - end - - it "is true when partition eof" do - expect(described_class.new(-191).is_partition_eof?).to be true - end - end - - describe "#==" do - let(:error) { described_class.new(10, "Error explanation") } - - it "equals another error with the same content" do - expect(error).to eq described_class.new(10, "Error explanation") - end - - it "does not equal another error with a different error code" do - expect(error).not_to eq described_class.new(20, "Error explanation") - end - - it "does not equal another error with a different message" do - expect(error).not_to eq described_class.new(10, "Different error explanation") - end - - it "does not equal another error with no message" do - expect(error).not_to eq described_class.new(10) - end - - it "does not equal another error with a different instance name" do - error_a = described_class.new(10, instance_name: "rdkafka#producer-1") - error_b = described_class.new(10, instance_name: "rdkafka#producer-2") - expect(error_a).not_to eq error_b - end - - it "equals another error with the same instance name" do - error_a = described_class.new(10, instance_name: "rdkafka#producer-1") - error_b = described_class.new(10, instance_name: "rdkafka#producer-1") - expect(error_a).to eq error_b - end - end -end - -RSpec.describe Rdkafka::LibraryLoadError do - it "is a subclass of BaseError" do - expect(described_class.new).to be_a(Rdkafka::BaseError) - end - - it "accepts a message" do - error = described_class.new("test message") - expect(error.message).to eq("test message") - end -end diff --git a/spec/lib/rdkafka/metadata_spec.rb b/spec/lib/rdkafka/metadata_spec.rb deleted file mode 100644 index b810a063..00000000 --- a/spec/lib/rdkafka/metadata_spec.rb +++ /dev/null @@ -1,81 +0,0 @@ -# frozen_string_literal: true - -require "securerandom" - -RSpec.describe Rdkafka::Metadata do - let(:config) { rdkafka_consumer_config } - let(:native_config) { config.send(:native_config) } - let(:native_kafka) { config.send(:native_kafka, native_config, :rd_kafka_consumer) } - - after do - Rdkafka::Bindings.rd_kafka_consumer_close(native_kafka) - Rdkafka::Bindings.rd_kafka_destroy(native_kafka) - end - - context "passing in a topic name" do - context "that is non-existent topic" do - let(:topic_name) { SecureRandom.uuid.to_s } - - it "raises an appropriate exception" do - expect { - described_class.new(native_kafka, topic_name) - }.to raise_exception(Rdkafka::RdkafkaError, "Broker: Unknown topic or partition (unknown_topic_or_part)") - end - end - - context "that is one of our test topics" do - let(:metadata) { described_class.new(native_kafka, topic_name) } - - let(:topic_name) { TestTopics.create(partitions: 25) } - - it "#brokers returns our single broker" do - expect(metadata.brokers.length).to eq(1) - expect(metadata.brokers[0][:broker_id]).to eq(1) - expect(%w[127.0.0.1 localhost]).to include(metadata.brokers[0][:broker_name]) - expect(metadata.brokers[0][:broker_port]).to eq(rdkafka_base_config[:"bootstrap.servers"].split(":").last.to_i) - end - - it "#topics returns data on our test topic" do - expect(metadata.topics.length).to eq(1) - expect(metadata.topics[0][:partition_count]).to eq(25) - expect(metadata.topics[0][:partitions].length).to eq(25) - expect(metadata.topics[0][:topic_name]).to eq(topic_name) - end - end - end - - context "not passing in a topic name" do - let(:metadata) { described_class.new(native_kafka, topic_name) } - - let(:topic_name) { nil } - let(:test_topic) { TestTopics.create } - - it "#brokers returns our single broker" do - expect(metadata.brokers.length).to eq(1) - expect(metadata.brokers[0][:broker_id]).to eq(1) - expect(%w[127.0.0.1 localhost]).to include(metadata.brokers[0][:broker_name]) - expect(metadata.brokers[0][:broker_port]).to eq(rdkafka_base_config[:"bootstrap.servers"].split(":").last.to_i) - end - - it "#topics returns data about existing topics" do - # Force topic creation before querying metadata - test_topic - result = metadata.topics.map { |topic| topic[:topic_name] } - expect(result).to include(test_topic) - end - end - - context "when a non-zero error code is returned" do - let(:topic_name) { SecureRandom.uuid.to_s } - - before do - allow(Rdkafka::Bindings).to receive(:rd_kafka_metadata).and_return(-165) - end - - it "creating the instance raises an exception" do - expect { - described_class.new(native_kafka, topic_name) - }.to raise_error(Rdkafka::RdkafkaError, /Local: Required feature not supported by broker \(unsupported_feature\)/) - end - end -end diff --git a/spec/lib/rdkafka/native_kafka_spec.rb b/spec/lib/rdkafka/native_kafka_spec.rb deleted file mode 100644 index 41870263..00000000 --- a/spec/lib/rdkafka/native_kafka_spec.rb +++ /dev/null @@ -1,178 +0,0 @@ -# frozen_string_literal: true - -RSpec.describe Rdkafka::NativeKafka do - let(:client) { described_class.new(native, run_polling_thread: true, opaque: opaque) } - - let(:config) { rdkafka_producer_config } - let(:native) { config.send(:native_kafka, config.send(:native_config), :rd_kafka_producer) } - let(:closing) { false } - let(:thread) { double(Thread) } - let(:opaque) { Rdkafka::Opaque.new } - - before do - allow(Rdkafka::Bindings).to receive(:rd_kafka_name).and_return("producer-1") - allow(Thread).to receive(:new).and_return(thread) - allow(thread).to receive(:name=).with("rdkafka.native_kafka#producer-1") - allow(thread).to receive(:[]=).with(:closing, anything) - allow(thread).to receive(:join) - allow(thread).to receive(:abort_on_exception=).with(anything) - end - - after { client.close } - - context "defaults" do - it "sets the thread name" do - expect(thread).to receive(:name=).with("rdkafka.native_kafka#producer-1") - - client - end - - it "sets the thread to abort on exception" do - expect(thread).to receive(:abort_on_exception=).with(true) - - client - end - - it "sets the thread `closing` flag to false" do - expect(thread).to receive(:[]=).with(:closing, false) - - client - end - end - - context "the polling thread" do - it "is created" do - expect(Thread).to receive(:new) - - client - end - end - - it "exposes the inner client" do - client.with_inner do |inner| - expect(inner).to eq(native) - end - end - - context "when client was not yet closed (`nil`)" do - it "is not closed" do - expect(client.closed?).to be(false) - end - - context "and attempt to close" do - it "calls the `destroy` binding" do - expect(Rdkafka::Bindings).to receive(:rd_kafka_destroy).with(native).and_call_original - - client.close - end - - it "indicates to the polling thread that it is closing" do - expect(thread).to receive(:[]=).with(:closing, true) - - client.close - end - - it "joins the polling thread" do - expect(thread).to receive(:join) - - client.close - end - - it "closes and unassign the native client" do - client.close - - expect(client.closed?).to be(true) - end - end - end - - context "when client was already closed" do - before { client.close } - - it "is closed" do - expect(client.closed?).to be(true) - end - - context "and attempt to close again" do - it "does not call the `destroy` binding" do - expect(Rdkafka::Bindings).not_to receive(:rd_kafka_destroy_flags) - - client.close - end - - it "does not indicate to the polling thread that it is closing" do - expect(thread).not_to receive(:[]=).with(:closing, true) - - client.close - end - - it "does not join the polling thread" do - expect(thread).not_to receive(:join) - - client.close - end - - it "does not close and unassign the native client again" do - client.close - - expect(client.closed?).to be(true) - end - end - end - - it "provides a finalizer that closes the native kafka client" do - expect(client.closed?).to be(false) - - client.finalizer.call("some-ignored-object-id") - - expect(client.closed?).to be(true) - end -end - -# Separate describe block for FD API tests to avoid interference with mocked threading tests -RSpec.describe Rdkafka::NativeKafka, "#enable_main_queue_io_events and #enable_background_queue_io_events" do - let(:config) { rdkafka_producer_config } - let(:native) { config.send(:native_kafka, config.send(:native_config), :rd_kafka_producer) } - let(:opaque) { Rdkafka::Opaque.new } - let(:client) { described_class.new(native, run_polling_thread: false, opaque: opaque, auto_start: false) } - - after { client.close unless client.closed? } - - it "allows IO events when polling thread is not active" do - signal_r, signal_w = IO.pipe - - expect { client.enable_main_queue_io_events(signal_w.fileno) }.not_to raise_error - expect { client.enable_background_queue_io_events(signal_w.fileno) }.not_to raise_error - - signal_r.close - signal_w.close - end - - it "accepts custom payload for IO events" do - signal_r, signal_w = IO.pipe - payload = "custom" - - expect { client.enable_main_queue_io_events(signal_w.fileno, payload) }.not_to raise_error - - signal_r.close - signal_w.close - end - - context "when client is closed" do - before { client.close } - - it "raises ClosedInnerError when enabling main_queue_io_events" do - signal_r, signal_w = IO.pipe - expect { client.enable_main_queue_io_events(signal_w.fileno) }.to raise_error(Rdkafka::ClosedInnerError) - signal_r.close - signal_w.close - end - - it "raises ClosedInnerError when enabling background_queue_io_events" do - signal_r, signal_w = IO.pipe - expect { client.enable_background_queue_io_events(signal_w.fileno) }.to raise_error(Rdkafka::ClosedInnerError) - signal_r.close - signal_w.close - end - end -end diff --git a/spec/lib/rdkafka/producer/delivery_handle_spec.rb b/spec/lib/rdkafka/producer/delivery_handle_spec.rb deleted file mode 100644 index 1a795a1d..00000000 --- a/spec/lib/rdkafka/producer/delivery_handle_spec.rb +++ /dev/null @@ -1,60 +0,0 @@ -# frozen_string_literal: true - -RSpec.describe Rdkafka::Producer::DeliveryHandle do - let(:handle) do - described_class.new.tap do |handle| - handle[:pending] = pending_handle - handle[:response] = response - handle[:partition] = 2 - handle[:offset] = 100 - handle.topic = TestTopics.unique - end - end - - let(:response) { 0 } - - describe "#wait" do - let(:pending_handle) { true } - - it "waits until the timeout and then raise an error" do - expect { - handle.wait(max_wait_timeout_ms: 100) - }.to raise_error Rdkafka::Producer::DeliveryHandle::WaitTimeoutError, /delivery/ - end - - context "when not pending anymore and no error" do - let(:pending_handle) { false } - - it "returns a delivery report" do - report = handle.wait - - expect(report.partition).to eq(2) - expect(report.offset).to eq(100) - expect(report.topic_name).to eq(handle.topic) - end - - it "waits without a timeout" do - report = handle.wait(max_wait_timeout_ms: nil) - - expect(report.partition).to eq(2) - expect(report.offset).to eq(100) - expect(report.topic_name).to eq(handle.topic) - end - end - end - - describe "#create_result" do - let(:pending_handle) { false } - let(:report) { handle.create_result } - - context "when response is 0" do - it { expect(report.error).to be_nil } - end - - context "when response is not 0" do - let(:response) { 1 } - - it { expect(report.error).to eq(Rdkafka::RdkafkaError.new(response)) } - end - end -end diff --git a/spec/lib/rdkafka/producer/delivery_report_spec.rb b/spec/lib/rdkafka/producer/delivery_report_spec.rb deleted file mode 100644 index 4e51ce72..00000000 --- a/spec/lib/rdkafka/producer/delivery_report_spec.rb +++ /dev/null @@ -1,27 +0,0 @@ -# frozen_string_literal: true - -RSpec.describe Rdkafka::Producer::DeliveryReport do - let(:report) { described_class.new(2, 100, topic_name, -1) } - - let(:topic_name) { TestTopics.unique } - - it "gets the partition" do - expect(report.partition).to eq 2 - end - - it "gets the offset" do - expect(report.offset).to eq 100 - end - - it "gets the topic_name" do - expect(report.topic_name).to eq topic_name - end - - it "gets the same topic name under topic alias" do - expect(report.topic).to eq topic_name - end - - it "gets the error" do - expect(report.error).to eq(-1) - end -end diff --git a/spec/lib/rdkafka/producer/partitions_count_cache_spec.rb b/spec/lib/rdkafka/producer/partitions_count_cache_spec.rb deleted file mode 100644 index 1165117c..00000000 --- a/spec/lib/rdkafka/producer/partitions_count_cache_spec.rb +++ /dev/null @@ -1,413 +0,0 @@ -# frozen_string_literal: true - -require "spec_helper" - -RSpec.describe Rdkafka::Producer::PartitionsCountCache do - let(:default_ttl_ms) { 1_000 } # Reduced from 30000 to speed up tests - let(:custom_ttl_ms) { 500 } # Half the default TTL - let(:cache) { described_class.new(ttl_ms: default_ttl_ms) } - let(:custom_ttl_cache) { described_class.new(ttl_ms: custom_ttl_ms) } - let(:topic) { TestTopics.unique } - let(:topic2) { TestTopics.unique } - let(:partition_count) { 5 } - let(:higher_partition_count) { 10 } - let(:lower_partition_count) { 3 } - let(:even_higher_partition_count) { 15 } - - describe "#initialize" do - it "creates a cache with default TTL when no TTL is specified" do - standard_cache = described_class.new - expect(standard_cache).to be_a(described_class) - end - - it "creates a cache with custom TTL when specified" do - expect(custom_ttl_cache).to be_a(described_class) - end - - context "backwards compatibility with ttl (seconds)" do - it "works with old ttl parameter (emits deprecation warning to stderr)" do - # Note: Deprecation warning is emitted but not tested here due to RSpec stderr capture complexity - old_style_cache = described_class.new(1) # 1 second - expect(old_style_cache).to be_a(described_class) - end - - it "converts seconds to milliseconds correctly" do - old_style_cache = described_class.new(2) # 2 seconds = 2000ms - - # Set a value and verify the TTL behavior - old_style_cache.set(topic, partition_count) - - # Wait 1.5 seconds (should still be valid as TTL is 2 seconds) - sleep(1.5) - result = old_style_cache.get(topic) { fail "Should not be called - cache should still be valid" } - expect(result).to eq(partition_count) - - # Wait another 0.7 seconds (total 2.2 seconds, should be expired) - sleep(0.7) - block_called = false - new_result = old_style_cache.get(topic) do - block_called = true - partition_count + 1 - end - expect(block_called).to be true - expect(new_result).to eq(partition_count + 1) - end - - it "accepts both ttl and ttl_ms parameters" do - cache_instance = described_class.new(1, ttl_ms: 1000) - expect(cache_instance).to be_a(described_class) - end - - it "uses ttl_ms when both parameters are provided" do - # ttl: 10 would be 10000ms, but ttl_ms: 500 should take precedence - both_params_cache = described_class.new(10, ttl_ms: 500) - - both_params_cache.set(topic, partition_count) - - # Wait 0.6 seconds (past 500ms TTL but not past 10 seconds) - sleep(0.6) - - # Should be expired because ttl_ms: 500 takes precedence - block_called = false - both_params_cache.get(topic) do - block_called = true - partition_count + 1 - end - - expect(block_called).to be true - end - end - end - - describe "#get" do - context "when cache is empty" do - it "yields to get the value and caches it" do - block_called = false - result = cache.get(topic) do - block_called = true - partition_count - end - - expect(block_called).to be true - expect(result).to eq(partition_count) - - # Verify caching by checking if block is called again - second_block_called = false - second_result = cache.get(topic) do - second_block_called = true - partition_count + 1 # Different value to ensure we get cached value - end - - expect(second_block_called).to be false - expect(second_result).to eq(partition_count) - end - end - - context "when cache has a value" do - before do - # Seed the cache with a value - cache.get(topic) { partition_count } - end - - it "returns cached value without yielding if not expired" do - block_called = false - result = cache.get(topic) do - block_called = true - partition_count + 1 # Different value to ensure we get cached one - end - - expect(block_called).to be false - expect(result).to eq(partition_count) - end - - it "yields to get new value when TTL has expired" do - # Wait for TTL to expire (convert ms to seconds) - sleep(default_ttl_ms / 1000.0 + 0.1) - - block_called = false - new_count = partition_count + 1 - result = cache.get(topic) do - block_called = true - new_count - end - - expect(block_called).to be true - expect(result).to eq(new_count) - - # Verify the new value is cached - second_block_called = false - second_result = cache.get(topic) do - second_block_called = true - new_count + 1 # Different value again - end - - expect(second_block_called).to be false - expect(second_result).to eq(new_count) - end - - it "respects a custom TTL" do - # Seed the custom TTL cache with a value - custom_ttl_cache.get(topic) { partition_count } - - # Wait for custom TTL to expire but not default TTL (convert ms to seconds) - sleep(custom_ttl_ms / 1000.0 + 0.1) - - # Custom TTL cache should refresh - custom_block_called = false - custom_result = custom_ttl_cache.get(topic) do - custom_block_called = true - higher_partition_count - end - - expect(custom_block_called).to be true - expect(custom_result).to eq(higher_partition_count) - - # Default TTL cache should not refresh yet - default_block_called = false - default_result = cache.get(topic) do - default_block_called = true - higher_partition_count - end - - expect(default_block_called).to be false - expect(default_result).to eq(partition_count) - end - end - - context "when new value is obtained" do - before do - # Seed the cache with initial value - cache.get(topic) { partition_count } - end - - it "updates cache when new value is higher than cached value" do - # Wait for TTL to expire (convert ms to seconds) - sleep(default_ttl_ms / 1000.0 + 0.1) - - # Get higher value - result = cache.get(topic) { higher_partition_count } - expect(result).to eq(higher_partition_count) - - # Verify it was cached - second_result = cache.get(topic) { fail "Should not be called" } - expect(second_result).to eq(higher_partition_count) - end - - it "preserves higher cached value when new value is lower" do - # First update to higher value (convert ms to seconds) - sleep(default_ttl_ms / 1000.0 + 0.1) - cache.get(topic) { higher_partition_count } - - # Then try to update to lower value (convert ms to seconds) - sleep(default_ttl_ms / 1000.0 + 0.1) - result = cache.get(topic) { lower_partition_count } - - expect(result).to eq(higher_partition_count) - - # and subsequent gets should return the previously cached higher value - second_result = cache.get(topic) { fail "Should not be called" } - expect(second_result).to eq(higher_partition_count) - end - - it "handles multiple topics independently" do - # Set up both topics with different values - cache.get(topic) { partition_count } - cache.get(topic2) { higher_partition_count } - - # Wait for TTL to expire (convert ms to seconds) - sleep(default_ttl_ms / 1000.0 + 0.1) - - # Update first topic - first_result = cache.get(topic) { even_higher_partition_count } - expect(first_result).to eq(even_higher_partition_count) - - # Update second topic independently - second_updated = higher_partition_count + 3 - second_result = cache.get(topic2) { second_updated } - expect(second_result).to eq(second_updated) - - # Both topics should have their updated values - expect(cache.get(topic) { fail "Should not be called" }).to eq(even_higher_partition_count) - expect(cache.get(topic2) { fail "Should not be called" }).to eq(second_updated) - end - end - end - - describe "#set" do - context "when cache is empty" do - it "adds a new entry to the cache" do - cache.set(topic, partition_count) - - # Verify through get - result = cache.get(topic) { fail "Should not be called" } - expect(result).to eq(partition_count) - end - end - - context "when cache already has a value" do - before do - cache.set(topic, partition_count) - end - - it "updates cache when new value is higher" do - cache.set(topic, higher_partition_count) - - result = cache.get(topic) { fail "Should not be called" } - expect(result).to eq(higher_partition_count) - end - - it "keeps original value when new value is lower" do - cache.set(topic, lower_partition_count) - - result = cache.get(topic) { fail "Should not be called" } - expect(result).to eq(partition_count) - end - - it "updates the timestamp even when keeping original value" do - # Set initial value - cache.set(topic, partition_count) - - # Wait until close to TTL expiring (convert ms to seconds) - sleep(default_ttl_ms / 1000.0 - 0.2) - - # Set lower value (should update timestamp but not value) - cache.set(topic, lower_partition_count) - - # Wait a bit more, but still under the full TTL if timestamp was refreshed - sleep(0.3) - - # Should still be valid due to timestamp refresh - result = cache.get(topic) { fail "Should not be called" } - expect(result).to eq(partition_count) - end - end - - context "with concurrent access" do - it "correctly handles simultaneous updates to the same topic" do - # This test focuses on the final value after concurrent updates - threads = [] - - # Create 5 threads that all try to update the same topic with increasing values - 5.times do |i| - threads << Thread.new do - value = 10 + i # Start at 10 to ensure all are higher than initial value - cache.set(topic, value) - end - end - - # Wait for all threads to complete - threads.each(&:join) - - # The highest value (14) should be stored and accessible through get - result = cache.get(topic) { fail "Should not be called" } - expect(result).to eq(14) - end - end - end - - describe "TTL behavior" do - it "treats entries as expired when they exceed TTL" do - # Set initial value - cache.get(topic) { partition_count } - - # Wait just under TTL (convert ms to seconds) - sleep(default_ttl_ms / 1000.0 - 0.2) - - # Value should still be cached (block should not be called) - result = cache.get(topic) { fail "Should not be called when cache is valid" } - expect(result).to eq(partition_count) - - # Now wait to exceed TTL - sleep(0.3) # Total sleep is now default_ttl_ms / 1000.0 + 0.1 - - # Cache should be expired, block should be called - block_called = false - new_value = partition_count + 3 - result = cache.get(topic) do - block_called = true - new_value - end - - expect(block_called).to be true - expect(result).to eq(new_value) - end - end - - describe "comprehensive scenarios" do - it "handles a full lifecycle of cache operations" do - # 1. Initial cache miss, fetch and store - result1 = cache.get(topic) { partition_count } - expect(result1).to eq(partition_count) - - # 2. Cache hit - result2 = cache.get(topic) { fail "Should not be called" } - expect(result2).to eq(partition_count) - - # 3. Attempt to set lower value - cache.set(topic, lower_partition_count) - result3 = cache.get(topic) { fail "Should not be called" } - # Should still return the higher original value - expect(result3).to eq(partition_count) - - # 4. Set higher value - cache.set(topic, higher_partition_count) - result4 = cache.get(topic) { fail "Should not be called" } - expect(result4).to eq(higher_partition_count) - - # 5. TTL expires, new value provided is lower (convert ms to seconds) - sleep(default_ttl_ms / 1000.0 + 0.1) - result5 = cache.get(topic) { lower_partition_count } - # This returns the highest value - expect(result5).to eq(higher_partition_count) - - # 6. But subsequent get should return the higher cached value - result6 = cache.get(topic) { fail "Should not be called" } - expect(result6).to eq(higher_partition_count) - - # 7. Set new highest value directly - even_higher = higher_partition_count + 5 - cache.set(topic, even_higher) - result7 = cache.get(topic) { fail "Should not be called" } - expect(result7).to eq(even_higher) - end - - it "handles multiple topics with different TTLs correctly" do - # Set up initial values - cache.get(topic) { partition_count } - custom_ttl_cache.get(topic) { partition_count } - - # Wait past custom TTL but not default TTL (convert ms to seconds) - sleep(custom_ttl_ms / 1000.0 + 0.1) - - # Default cache should NOT refresh (still within default TTL) - default_result = cache.get(topic) { fail "Should not be called for default cache" } - # Original value should be maintained - expect(default_result).to eq(partition_count) - - # Custom TTL cache SHOULD refresh (past custom TTL) - custom_cache_value = partition_count + 8 - custom_block_called = false - custom_result = custom_ttl_cache.get(topic) do - custom_block_called = true - custom_cache_value - end - - expect(custom_block_called).to be true - expect(custom_result).to eq(custom_cache_value) - - # Now wait past default TTL (convert ms to seconds) - sleep((default_ttl_ms - custom_ttl_ms) / 1000.0 + 0.1) - - # Now default cache should also refresh - default_block_called = false - new_default_value = partition_count + 10 - new_default_result = cache.get(topic) do - default_block_called = true - new_default_value - end - - expect(default_block_called).to be true - expect(new_default_result).to eq(new_default_value) - end - end -end diff --git a/spec/spec_helper.rb b/spec/spec_helper.rb deleted file mode 100644 index 409b8e13..00000000 --- a/spec/spec_helper.rb +++ /dev/null @@ -1,76 +0,0 @@ -# frozen_string_literal: true - -Warning[:performance] = true if RUBY_VERSION >= "3.3" -Warning[:deprecated] = true -$VERBOSE = true - -require "warning" - -Warning.process do |warning| - next unless warning.include?(Dir.pwd) - # Allow OpenStruct usage only in specs - next if warning.include?("OpenStruct use") && warning.include?("_spec") - - raise "Warning in your code: #{warning}" -end - -unless ENV["CI"] == "true" - require "simplecov" - SimpleCov.start do - add_filter "/spec/" - end -end - -require "pry" -require "rspec" -require "rdkafka" -require "timeout" -require "securerandom" - -# Load support modules -require_relative "support/kafka_config_helpers" -require_relative "support/kafka_wait_helpers" -require_relative "support/native_client_helpers" -require_relative "support/test_topics" - -RSpec.configure do |config| - config.disable_monkey_patching! - - config.include KafkaConfigHelpers - config.include KafkaWaitHelpers - config.include NativeClientHelpers - - config.filter_run focus: true - config.run_all_when_everything_filtered = true - - config.before do - Rdkafka::Config.statistics_callback = nil - Rdkafka::Config.error_callback = nil - Rdkafka::Config.oauthbearer_token_refresh_callback = nil - # We need to clear it so state does not leak between specs - Rdkafka::Producer.partitions_count_cache.to_h.clear - end - - config.before(:suite) do - admin = KafkaConfigHelpers.rdkafka_config.admin - { - TestTopics.example_topic => 1 - }.each do |topic, partitions| - create_topic_handle = admin.create_topic(topic, partitions, 1) - begin - create_topic_handle.wait(max_wait_timeout_ms: 1_000) - rescue Rdkafka::RdkafkaError => ex - raise unless ex.message.match?(/topic_already_exists/) - end - end - admin.close - end - - config.around do |example| - # Timeout specs after 1.5 minute. If they take longer - # they are probably stuck - Timeout.timeout(90) do - example.run - end - end -end diff --git a/test/integrations/librdkafka_admin_features_test.rb b/test/integrations/librdkafka_admin_features_test.rb new file mode 100644 index 00000000..0cca7d7a --- /dev/null +++ b/test/integrations/librdkafka_admin_features_test.rb @@ -0,0 +1,83 @@ +# frozen_string_literal: true + +# This integration test verifies that librdkafka admin is compiled with all expected builtin features. +# These features are critical for Karafka and rdkafka-ruby to function properly. + +require_relative "../test_helper" +require "stringio" + +# Expected features that should be present in our compiled librdkafka +ADMIN_EXPECTED_BUILTIN_FEATURES = %w[ + gzip + snappy + ssl + sasl + regex + lz4 + sasl_plain + sasl_scram + plugins + zstd + sasl_oauthbearer +].freeze + +# Precompiled builds include GSSAPI (via MIT Kerberos + Cyrus SASL) +ADMIN_PRECOMPILED_FEATURES = (ADMIN_EXPECTED_BUILTIN_FEATURES + %w[sasl_gssapi]).freeze + +describe "Librdkafka Admin Features" do + before do + @captured_output = StringIO.new + @logger = Logger.new(@captured_output) + @logger.level = Logger::DEBUG + + @original_logger = Rdkafka::Config.logger + Rdkafka::Config.logger = @logger + Rdkafka::Config.ensure_log_thread + + config = Rdkafka::Config.new( + "bootstrap.servers": "localhost:9092", + "client.id": "admin-feature-test", + debug: "all" + ) + + @admin = config.admin + + # Wait for log messages to be processed + sleep 2 + end + + after do + @admin&.close + Rdkafka::Config.logger = @original_logger + end + + it "includes all expected builtin features in admin client logs" do + log_content = @captured_output.string + + # Find the initialization line that contains builtin.features + feature_line = log_content.lines.find { |line| line.include?("builtin.features") } + + refute_nil feature_line, "Could not find builtin.features in admin log output" + + # Extract the features list from the line + # Format: "... (builtin.features gzip,snappy,ssl,..., ...)" + match = feature_line.match(/builtin\.features\s+([^,]+(?:,[^,\s]+)*)/i) + + refute_nil match, "Could not parse builtin.features from log line: #{feature_line}" + + features_string = match[1] + actual_features = features_string.split(",").map(&:strip) + + # Verify all expected features are present + expected = if ENV["RDKAFKA_PRECOMPILED"] == "true" + ADMIN_PRECOMPILED_FEATURES + else + ADMIN_EXPECTED_BUILTIN_FEATURES + end + + missing_features = expected - actual_features + + assert_empty missing_features, + "Missing expected builtin features: #{missing_features.join(', ')}" + end +end diff --git a/test/integrations/librdkafka_consumer_features_test.rb b/test/integrations/librdkafka_consumer_features_test.rb new file mode 100644 index 00000000..55bfa051 --- /dev/null +++ b/test/integrations/librdkafka_consumer_features_test.rb @@ -0,0 +1,84 @@ +# frozen_string_literal: true + +# This integration test verifies that librdkafka consumer is compiled with all expected builtin features. +# These features are critical for Karafka and rdkafka-ruby to function properly. + +require_relative "../test_helper" +require "stringio" + +# Expected features that should be present in our compiled librdkafka +CONSUMER_EXPECTED_BUILTIN_FEATURES = %w[ + gzip + snappy + ssl + sasl + regex + lz4 + sasl_plain + sasl_scram + plugins + zstd + sasl_oauthbearer +].freeze + +# Precompiled builds include GSSAPI (via MIT Kerberos + Cyrus SASL) +CONSUMER_PRECOMPILED_FEATURES = (CONSUMER_EXPECTED_BUILTIN_FEATURES + %w[sasl_gssapi]).freeze + +describe "Librdkafka Consumer Features" do + before do + @captured_output = StringIO.new + @logger = Logger.new(@captured_output) + @logger.level = Logger::DEBUG + + @original_logger = Rdkafka::Config.logger + Rdkafka::Config.logger = @logger + Rdkafka::Config.ensure_log_thread + + config = Rdkafka::Config.new( + "bootstrap.servers": "localhost:9092", + "client.id": "consumer-feature-test", + "group.id": "feature-test-group", + debug: "all" + ) + + @consumer = config.consumer + + # Wait for log messages to be processed + sleep 2 + end + + after do + @consumer&.close + Rdkafka::Config.logger = @original_logger + end + + it "includes all expected builtin features in consumer client logs" do + log_content = @captured_output.string + + # Find the initialization line that contains builtin.features + feature_line = log_content.lines.find { |line| line.include?("builtin.features") } + + refute_nil feature_line, "Could not find builtin.features in consumer log output" + + # Extract the features list from the line + # Format: "... (builtin.features gzip,snappy,ssl,..., ...)" + match = feature_line.match(/builtin\.features\s+([^,]+(?:,[^,\s]+)*)/i) + + refute_nil match, "Could not parse builtin.features from log line: #{feature_line}" + + features_string = match[1] + actual_features = features_string.split(",").map(&:strip) + + # Verify all expected features are present + expected = if ENV["RDKAFKA_PRECOMPILED"] == "true" + CONSUMER_PRECOMPILED_FEATURES + else + CONSUMER_EXPECTED_BUILTIN_FEATURES + end + + missing_features = expected - actual_features + + assert_empty missing_features, + "Missing expected builtin features: #{missing_features.join(', ')}" + end +end diff --git a/test/integrations/librdkafka_producer_features_test.rb b/test/integrations/librdkafka_producer_features_test.rb new file mode 100644 index 00000000..2339bfee --- /dev/null +++ b/test/integrations/librdkafka_producer_features_test.rb @@ -0,0 +1,83 @@ +# frozen_string_literal: true + +# This integration test verifies that librdkafka producer is compiled with all expected builtin features. +# These features are critical for Karafka and rdkafka-ruby to function properly. + +require_relative "../test_helper" +require "stringio" + +# Expected features that should be present in our compiled librdkafka +PRODUCER_EXPECTED_BUILTIN_FEATURES = %w[ + gzip + snappy + ssl + sasl + regex + lz4 + sasl_plain + sasl_scram + plugins + zstd + sasl_oauthbearer +].freeze + +# Precompiled builds include GSSAPI (via MIT Kerberos + Cyrus SASL) +PRODUCER_PRECOMPILED_FEATURES = (PRODUCER_EXPECTED_BUILTIN_FEATURES + %w[sasl_gssapi]).freeze + +describe "Librdkafka Producer Features" do + before do + @captured_output = StringIO.new + @logger = Logger.new(@captured_output) + @logger.level = Logger::DEBUG + + @original_logger = Rdkafka::Config.logger + Rdkafka::Config.logger = @logger + Rdkafka::Config.ensure_log_thread + + config = Rdkafka::Config.new( + "bootstrap.servers": "localhost:9092", + "client.id": "producer-feature-test", + debug: "all" + ) + + @producer = config.producer + + # Wait for log messages to be processed + sleep 2 + end + + after do + @producer&.close + Rdkafka::Config.logger = @original_logger + end + + it "includes all expected builtin features in producer client logs" do + log_content = @captured_output.string + + # Find the initialization line that contains builtin.features + feature_line = log_content.lines.find { |line| line.include?("builtin.features") } + + refute_nil feature_line, "Could not find builtin.features in producer log output" + + # Extract the features list from the line + # Format: "... (builtin.features gzip,snappy,ssl,..., ...)" + match = feature_line.match(/builtin\.features\s+([^,]+(?:,[^,\s]+)*)/i) + + refute_nil match, "Could not parse builtin.features from log line: #{feature_line}" + + features_string = match[1] + actual_features = features_string.split(",").map(&:strip) + + # Verify all expected features are present + expected = if ENV["RDKAFKA_PRECOMPILED"] == "true" + PRODUCER_PRECOMPILED_FEATURES + else + PRODUCER_EXPECTED_BUILTIN_FEATURES + end + + missing_features = expected - actual_features + + assert_empty missing_features, + "Missing expected builtin features: #{missing_features.join(', ')}" + end +end diff --git a/test/integrations/ssl_stress_test.rb b/test/integrations/ssl_stress_test.rb new file mode 100644 index 00000000..04e95847 --- /dev/null +++ b/test/integrations/ssl_stress_test.rb @@ -0,0 +1,126 @@ +# frozen_string_literal: true + +# This integration test is designed to stress-test the OpenSSL SSL/TLS layer under high concurrency +# to help detect regressions like the one described in OpenSSL issue #28171: +# https://github.com/openssl/openssl/issues/28171 +# +# Issue summary: +# - OpenSSL 3.0.17 introduced a concurrency-related regression. +# - Multiple threads sharing the same SSL_CTX and making parallel TLS connections +# (often with certificate verification enabled) can cause segmentation faults +# due to race conditions in X509 store handling. +# - Affected users include Python (httpx), Rust (reqwest, native-tls), and C applications. +# +# Script details: +# - Starts 100 SSL servers using self-signed, in-memory certs on sequential localhost ports. +# - Uses `rdkafka-ruby` to spin up 100 consumer threads that continuously create and destroy +# SSL connections to these servers for a given duration. +# - This mimics high TLS connection churn and aims to trigger latent SSL_CTX or X509_STORE +# threading bugs like double-frees, memory corruption, or segmentation faults. +# +# Goal: +# - Catch regressions early by validating that heavy concurrent SSL use does not lead to crashes. +# - Provide a minimal and repeatable reproducer when diagnosing OpenSSL-level SSL instability. +# +# In case of a failure, segfault will happen + +require_relative "../test_helper" +require "socket" +require "openssl" + +describe "SSL Stress Test" do + before do + @starting_port = 19093 + @num_ports = 150 + @batches = 100 + @ports = @starting_port...(@starting_port + @num_ports) + + @config = { + "bootstrap.servers": Array.new(@num_ports) { |i| "127.0.0.1:#{@starting_port + i}" }.join(","), + "security.protocol": "SSL", + "enable.ssl.certificate.verification": false + } + + # Generate in-memory self-signed cert + @key = OpenSSL::PKey::RSA.new(2048) + + name = OpenSSL::X509::Name.parse("/CN=127.0.0.1") + @cert = OpenSSL::X509::Certificate.new + @cert.version = 2 + @cert.serial = 1 + @cert.subject = name + @cert.issuer = name + @cert.public_key = @key.public_key + @cert.not_before = Time.now + @cert.not_after = Time.now + 3600 + @cert.sign(@key, OpenSSL::Digest.new("SHA256")) + + # Start servers on multiple ports + @server_threads = @ports.map do |port| + Thread.new do + # Prepare SSL context + # We do not use a shared context for the server because the goal is to stress librdkafka + # layer and not the Ruby SSL layer + ssl_context = OpenSSL::SSL::SSLContext.new + ssl_context.cert = @cert + ssl_context.key = @key + + tcp_server = TCPServer.new("127.0.0.1", port) + ssl_server = OpenSSL::SSL::SSLServer.new(tcp_server, ssl_context) + + loop do + ssl_socket = ssl_server.accept + ssl_socket.close + rescue => e + # Some errors are expected and irrelevant + next if e.message.include?("unexpected eof while reading") + end + end + end + + # Wait for the servers to be available + # We want to make sure that they are available so we are sure that librdkafka actually + # hammers them + timeout = 30 + start = Time.now + + loop do + all_up = @ports.all? do |port| + TCPSocket.new("127.0.0.1", port).close + true + rescue + false + end + + break if all_up + + raise "Timeout waiting for SSL servers" if Time.now - start > timeout + + sleep 0.1 + end + end + + after do + @server_threads&.each(&:kill) + end + + it "does not crash under heavy concurrent SSL connection churn" do + start_time = Time.now + duration = 60 * 10 # 10 minutes - it should crash faster than that if SSL vulnerable + + while Time.now - start_time < duration + consumers = Array.new(@batches) do + Rdkafka::Config.new(@config).consumer + end + + # This print is needed. No idea why but it increases the chances of segfault + $stdout.print "" + + sleep(1) + consumers.each(&:close) + end + + # If we reach here without segfault, the test passes + pass + end +end diff --git a/test/integrations/unregistered_scheme_file_test.rb b/test/integrations/unregistered_scheme_file_test.rb new file mode 100644 index 00000000..d3a02102 --- /dev/null +++ b/test/integrations/unregistered_scheme_file_test.rb @@ -0,0 +1,117 @@ +# frozen_string_literal: true + +# This integration test verifies that rdkafka properly detects and reports specific SSL +# configuration errors when attempting to connect to an SSL-enabled Kafka broker. +# +# It also ensures that we do not statically link ssl certs into incorrect tmp cert location. +# +# These errors occur when rdkafka's underlying OpenSSL library encounters issues +# with SSL certificate validation, particularly related to file scheme handling +# and missing certificate directories. + +require_relative "../test_helper" +require "socket" +require "openssl" +require "stringio" + +describe "Unregistered Scheme File" do + before do + @captured_output = StringIO.new + @original_logger = Rdkafka::Config.logger + Rdkafka::Config.logger = Logger.new(@captured_output) + + # Start a dummy SSL server with self-signed certificate + @ssl_server_thread = Thread.new do + # Create TCP server + tcp_server = TCPServer.new("localhost", 9099) + + # Generate self-signed certificate + key = OpenSSL::PKey::RSA.new(2048) + cert = OpenSSL::X509::Certificate.new + cert.version = 2 + cert.serial = 1 + cert.subject = OpenSSL::X509::Name.parse("/DC=org/DC=ruby-test/CN=localhost") + cert.issuer = cert.subject + cert.public_key = key.public_key + cert.not_before = Time.now + cert.not_after = cert.not_before + 365 * 24 * 60 * 60 # 1 year + + # Add extensions + ef = OpenSSL::X509::ExtensionFactory.new + ef.subject_certificate = cert + ef.issuer_certificate = cert + cert.add_extension(ef.create_extension("basicConstraints", "CA:TRUE", true)) + cert.add_extension(ef.create_extension("keyUsage", "keyCertSign, cRLSign", true)) + cert.add_extension(ef.create_extension("subjectKeyIdentifier", "hash", false)) + cert.add_extension(ef.create_extension("authorityKeyIdentifier", "keyid:always", false)) + + cert.sign(key, OpenSSL::Digest.new("SHA256")) + + # Create SSL context + ssl_context = OpenSSL::SSL::SSLContext.new + ssl_context.cert = cert + ssl_context.key = key + + # Wrap TCP server with SSL + ssl_server = OpenSSL::SSL::SSLServer.new(tcp_server, ssl_context) + + loop do + client = ssl_server.accept + client.puts("Invalid Kafka broker") + client.close + rescue + # Ignore SSL server errors - they're expected + end + rescue + # Ignore thread-level errors + end + + # Give the server time to start + sleep 1 + + config = Rdkafka::Config.new( + "bootstrap.servers": "localhost:9099", + "security.protocol": "SSL", + "client.id": "test-client", + "group.id": "test-group" + ) + + @consumer = config.consumer + end + + after do + @consumer&.close + @ssl_server_thread&.kill + Rdkafka::Config.logger = @original_logger + end + + it "does not produce unregistered scheme or missing file errors in SSL logs" do + @consumer.subscribe("test-topic") + + # Try to poll for messages - this triggers SSL errors + start_time = Time.now + timeout = 5 + + while Time.now - start_time < timeout + begin + @consumer.poll(1000) + rescue + break + end + end + + # Wait for rdkafka to finish logging errors + sleep 2 + + # Check captured logs for target error patterns + @captured_output.rewind + log_lines = @captured_output.readlines + + log_lines.each do |line| + refute_includes line, "routines::unregistered scheme", + "Found 'unregistered scheme' error in SSL logs" + refute_includes line, "system library::No such file or directory", + "Found 'No such file or directory' error in SSL logs" + end + end +end diff --git a/test/lib/rdkafka/abstract_handle_test.rb b/test/lib/rdkafka/abstract_handle_test.rb new file mode 100644 index 00000000..57ef09cd --- /dev/null +++ b/test/lib/rdkafka/abstract_handle_test.rb @@ -0,0 +1,179 @@ +# frozen_string_literal: true + +require_relative "../../test_helper" + +class BadTestHandle < Rdkafka::AbstractHandle + layout :pending, :bool, + :response, :int +end + +class TestHandle < Rdkafka::AbstractHandle + layout :pending, :bool, + :response, :int, + :result, :int + + def operation_name + "test_operation" + end + + def create_result + self[:result] + end +end + +describe Rdkafka::AbstractHandle do + context "A subclass that does not implement the required methods" do + it "raises an exception if operation_name is called" do + e = assert_raises(RuntimeError) { BadTestHandle.new.operation_name } + assert_match(/Must be implemented by subclass!/, e.message) + end + + it "raises an exception if create_result is called" do + e = assert_raises(RuntimeError) { BadTestHandle.new.create_result } + assert_match(/Must be implemented by subclass!/, e.message) + end + end + + describe ".register and .remove" do + before do + @handle = TestHandle.new.tap do |handle| + handle[:pending] = true + handle[:response] = 0 + handle[:result] = -1 + end + end + + it "registers and remove a delivery handle" do + described_class.register(@handle) + removed = described_class.remove(@handle.to_ptr.address) + assert_equal @handle, removed + assert_empty Rdkafka::AbstractHandle::REGISTRY + end + end + + describe "#pending?" do + context "when true" do + before do + @handle = TestHandle.new.tap do |handle| + handle[:pending] = true + handle[:response] = 0 + handle[:result] = -1 + end + end + + it "is true" do + assert_equal true, @handle.pending? + end + end + + context "when not true" do + before do + @handle = TestHandle.new.tap do |handle| + handle[:pending] = false + handle[:response] = 0 + handle[:result] = -1 + end + end + + it "is false" do + assert_equal false, @handle.pending? + end + end + end + + describe "#wait" do + context "when pending_handle true" do + before do + @handle = TestHandle.new.tap do |handle| + handle[:pending] = true + handle[:response] = 0 + handle[:result] = -1 + end + end + + it "waits until the timeout and then raise an error" do + e = assert_raises(Rdkafka::AbstractHandle::WaitTimeoutError) { + @handle.wait(max_wait_timeout_ms: 100) + } + assert_match(/test_operation/, e.message) + end + end + + context "when pending_handle false" do + context "without error" do + before do + @handle = TestHandle.new.tap do |handle| + handle[:pending] = false + handle[:response] = 0 + handle[:result] = 1 + end + end + + it "returns a result" do + wait_result = @handle.wait + assert_equal 1, wait_result + end + + it "waits without a timeout" do + wait_result = @handle.wait(max_wait_timeout_ms: nil) + assert_equal 1, wait_result + end + end + + context "with error" do + before do + @handle = TestHandle.new.tap do |handle| + handle[:pending] = false + handle[:response] = 20 + handle[:result] = -1 + end + end + + it "raises an rdkafka error" do + assert_raises(Rdkafka::RdkafkaError) { @handle.wait } + end + end + + context "backwards compatibility with max_wait_timeout (seconds)" do + before do + @handle = TestHandle.new.tap do |handle| + handle[:pending] = false + handle[:response] = 0 + handle[:result] = 42 + end + end + + it "works with max_wait_timeout (emits deprecation warning to stderr)" do + wait_result = @handle.wait(max_wait_timeout: 5) + assert_equal 42, wait_result + end + + it "works with max_wait_timeout set to nil (wait forever)" do + wait_result = @handle.wait(max_wait_timeout: nil) + assert_equal 42, wait_result + end + + it "properly converts seconds to milliseconds" do + @handle[:pending] = true + e = assert_raises(Rdkafka::AbstractHandle::WaitTimeoutError) { + @handle.wait(max_wait_timeout: 0.1) + } + assert_match(/100 ms/, e.message) + end + + it "uses new parameter when both are provided" do + wait_result = @handle.wait(max_wait_timeout: 1, max_wait_timeout_ms: 5000) + assert_equal 42, wait_result + end + + it "timeouts based on max_wait_timeout_ms when both are provided" do + @handle[:pending] = true + e = assert_raises(Rdkafka::AbstractHandle::WaitTimeoutError) { + @handle.wait(max_wait_timeout: 10, max_wait_timeout_ms: 100) + } + assert_match(/100 ms/, e.message) + end + end + end + end +end diff --git a/test/lib/rdkafka/admin/create_acl_handle_test.rb b/test/lib/rdkafka/admin/create_acl_handle_test.rb new file mode 100644 index 00000000..bb57e8d4 --- /dev/null +++ b/test/lib/rdkafka/admin/create_acl_handle_test.rb @@ -0,0 +1,65 @@ +# frozen_string_literal: true + +require_relative "../../../test_helper" + +describe Rdkafka::Admin::CreateAclHandle do + before do + @response = Rdkafka::Bindings::RD_KAFKA_RESP_ERR_NO_ERROR + end + + describe "#wait" do + context "when pending" do + before do + @handle = described_class.new.tap do |handle| + handle[:pending] = true + handle[:response] = @response + handle[:response_string] = FFI::MemoryPointer.from_string("") + end + end + + it "waits until the timeout and then raise an error" do + e = assert_raises(Rdkafka::Admin::CreateAclHandle::WaitTimeoutError) { + @handle.wait(max_wait_timeout_ms: 100) + } + assert_match(/create acl/, e.message) + end + end + + context "when not pending anymore and no error" do + before do + @handle = described_class.new.tap do |handle| + handle[:pending] = false + handle[:response] = @response + handle[:response_string] = FFI::MemoryPointer.from_string("") + end + end + + it "returns a create acl report" do + report = @handle.wait + + assert_equal "", report.rdkafka_response_string + end + + it "waits without a timeout" do + report = @handle.wait(max_wait_timeout_ms: nil) + + assert_equal "", report.rdkafka_response_string + end + end + end + + describe "#raise_error" do + before do + @handle = described_class.new.tap do |handle| + handle[:pending] = false + handle[:response] = @response + handle[:response_string] = FFI::MemoryPointer.from_string("") + end + end + + it "raises the appropriate error" do + e = assert_raises(Rdkafka::RdkafkaError) { @handle.raise_error } + assert_match(/Success \(no_error\)/, e.message) + end + end +end diff --git a/spec/lib/rdkafka/admin/create_acl_report_spec.rb b/test/lib/rdkafka/admin/create_acl_report_test.rb similarity index 52% rename from spec/lib/rdkafka/admin/create_acl_report_spec.rb rename to test/lib/rdkafka/admin/create_acl_report_test.rb index ffe511c5..ba8cdcf7 100644 --- a/spec/lib/rdkafka/admin/create_acl_report_spec.rb +++ b/test/lib/rdkafka/admin/create_acl_report_test.rb @@ -1,18 +1,20 @@ # frozen_string_literal: true -RSpec.describe Rdkafka::Admin::CreateAclReport do - let(:report) { - described_class.new( +require_relative "../../../test_helper" + +describe Rdkafka::Admin::CreateAclReport do + before do + @report = described_class.new( rdkafka_response: Rdkafka::Bindings::RD_KAFKA_RESP_ERR_NO_ERROR, rdkafka_response_string: FFI::MemoryPointer.from_string("") ) - } + end it "gets RD_KAFKA_RESP_ERR_NO_ERROR" do - expect(report.rdkafka_response).to eq(0) + assert_equal 0, @report.rdkafka_response end it "gets empty string" do - expect(report.rdkafka_response_string).to eq("") + assert_equal "", @report.rdkafka_response_string end end diff --git a/test/lib/rdkafka/admin/create_topic_handle_test.rb b/test/lib/rdkafka/admin/create_topic_handle_test.rb new file mode 100644 index 00000000..b1b61fd8 --- /dev/null +++ b/test/lib/rdkafka/admin/create_topic_handle_test.rb @@ -0,0 +1,70 @@ +# frozen_string_literal: true + +require_relative "../../../test_helper" + +describe Rdkafka::Admin::CreateTopicHandle do + before do + @topic_name = TestTopics.unique + end + + describe "#wait" do + context "when pending" do + before do + @handle = described_class.new.tap do |handle| + handle[:pending] = true + handle[:response] = 0 + handle[:error_string] = FFI::Pointer::NULL + handle[:result_name] = FFI::MemoryPointer.from_string(@topic_name) + end + end + + it "waits until the timeout and then raise an error" do + e = assert_raises(Rdkafka::Admin::CreateTopicHandle::WaitTimeoutError) { + @handle.wait(max_wait_timeout_ms: 100) + } + assert_match(/create topic/, e.message) + end + end + + context "when not pending anymore and no error" do + before do + @handle = described_class.new.tap do |handle| + handle[:pending] = false + handle[:response] = 0 + handle[:error_string] = FFI::Pointer::NULL + handle[:result_name] = FFI::MemoryPointer.from_string(@topic_name) + end + end + + it "returns a create topic report" do + report = @handle.wait + + assert_nil report.error_string + assert_equal @topic_name, report.result_name + end + + it "waits without a timeout" do + report = @handle.wait(max_wait_timeout_ms: nil) + + assert_nil report.error_string + assert_equal @topic_name, report.result_name + end + end + end + + describe "#raise_error" do + before do + @handle = described_class.new.tap do |handle| + handle[:pending] = false + handle[:response] = 0 + handle[:error_string] = FFI::Pointer::NULL + handle[:result_name] = FFI::MemoryPointer.from_string(@topic_name) + end + end + + it "raises the appropriate error" do + e = assert_raises(Rdkafka::RdkafkaError) { @handle.raise_error } + assert_match(/Success \(no_error\)/, e.message) + end + end +end diff --git a/test/lib/rdkafka/admin/create_topic_report_test.rb b/test/lib/rdkafka/admin/create_topic_report_test.rb new file mode 100644 index 00000000..669a1f76 --- /dev/null +++ b/test/lib/rdkafka/admin/create_topic_report_test.rb @@ -0,0 +1,20 @@ +# frozen_string_literal: true + +require_relative "../../../test_helper" + +describe Rdkafka::Admin::CreateTopicReport do + before do + @report = described_class.new( + FFI::MemoryPointer.from_string("error string"), + FFI::MemoryPointer.from_string("result name") + ) + end + + it "gets the error string" do + assert_equal "error string", @report.error_string + end + + it "gets the result name" do + assert_equal "result name", @report.result_name + end +end diff --git a/test/lib/rdkafka/admin/delete_acl_handle_test.rb b/test/lib/rdkafka/admin/delete_acl_handle_test.rb new file mode 100644 index 00000000..a2e3c3c8 --- /dev/null +++ b/test/lib/rdkafka/admin/delete_acl_handle_test.rb @@ -0,0 +1,88 @@ +# frozen_string_literal: true + +require_relative "../../../test_helper" + +describe Rdkafka::Admin::DeleteAclHandle do + before do + @resource_name = TestTopics.unique + @resource_type = Rdkafka::Bindings::RD_KAFKA_RESOURCE_TOPIC + @resource_pattern_type = Rdkafka::Bindings::RD_KAFKA_RESOURCE_PATTERN_LITERAL + @principal = "User:anonymous" + @host = "*" + @operation = Rdkafka::Bindings::RD_KAFKA_ACL_OPERATION_READ + @permission_type = Rdkafka::Bindings::RD_KAFKA_ACL_PERMISSION_TYPE_ALLOW + @response = Rdkafka::Bindings::RD_KAFKA_RESP_ERR_NO_ERROR + end + + def build_handle(pending:) + error_buffer = FFI::MemoryPointer.from_string(" " * 256) + delete_acl_ptr = Rdkafka::Bindings.rd_kafka_AclBinding_new( + @resource_type, + FFI::MemoryPointer.from_string(@resource_name), + @resource_pattern_type, + FFI::MemoryPointer.from_string(@principal), + FFI::MemoryPointer.from_string(@host), + @operation, + @permission_type, + error_buffer, + 256 + ) + if delete_acl_ptr.null? + raise Rdkafka::Config::ConfigError.new(error_buffer.read_string) + end + pointer_array = [delete_acl_ptr] + delete_acls_array_ptr = FFI::MemoryPointer.new(:pointer) + delete_acls_array_ptr.write_array_of_pointer(pointer_array) + described_class.new.tap do |handle| + handle[:pending] = pending + handle[:response] = @response + handle[:response_string] = FFI::MemoryPointer.from_string("") + handle[:matching_acls] = delete_acls_array_ptr + handle[:matching_acls_count] = 1 + end + end + + describe "#wait" do + context "when pending" do + before do + @handle = build_handle(pending: true) + end + + it "waits until the timeout and then raise an error" do + e = assert_raises(Rdkafka::Admin::DeleteAclHandle::WaitTimeoutError) { + @handle.wait(max_wait_timeout_ms: 100) + } + assert_match(/delete acl/, e.message) + end + end + + context "when not pending anymore and no error" do + before do + @handle = build_handle(pending: false) + end + + it "returns a delete acl report" do + report = @handle.wait + + assert_equal 1, report.deleted_acls.length + end + + it "waits without a timeout" do + report = @handle.wait(max_wait_timeout_ms: nil) + + assert_equal @resource_name, report.deleted_acls[0].matching_acl_resource_name + end + end + end + + describe "#raise_error" do + before do + @handle = build_handle(pending: false) + end + + it "raises the appropriate error" do + e = assert_raises(Rdkafka::RdkafkaError) { @handle.raise_error } + assert_match(/Success \(no_error\)/, e.message) + end + end +end diff --git a/test/lib/rdkafka/admin/delete_acl_report_test.rb b/test/lib/rdkafka/admin/delete_acl_report_test.rb new file mode 100644 index 00000000..11883cb1 --- /dev/null +++ b/test/lib/rdkafka/admin/delete_acl_report_test.rb @@ -0,0 +1,64 @@ +# frozen_string_literal: true + +require_relative "../../../test_helper" + +describe Rdkafka::Admin::DeleteAclReport do + before do + @resource_name = TestTopics.unique + @resource_type = Rdkafka::Bindings::RD_KAFKA_RESOURCE_TOPIC + @resource_pattern_type = Rdkafka::Bindings::RD_KAFKA_RESOURCE_PATTERN_LITERAL + @principal = "User:anonymous" + @host = "*" + @operation = Rdkafka::Bindings::RD_KAFKA_ACL_OPERATION_READ + @permission_type = Rdkafka::Bindings::RD_KAFKA_ACL_PERMISSION_TYPE_ALLOW + + error_buffer = FFI::MemoryPointer.from_string(" " * 256) + delete_acl_ptr = Rdkafka::Bindings.rd_kafka_AclBinding_new( + @resource_type, + FFI::MemoryPointer.from_string(@resource_name), + @resource_pattern_type, + FFI::MemoryPointer.from_string(@principal), + FFI::MemoryPointer.from_string(@host), + @operation, + @permission_type, + error_buffer, + 256 + ) + if delete_acl_ptr.null? + raise Rdkafka::Config::ConfigError.new(error_buffer.read_string) + end + pointer_array = [delete_acl_ptr] + delete_acls_array_ptr = FFI::MemoryPointer.new(:pointer) + delete_acls_array_ptr.write_array_of_pointer(pointer_array) + @report = described_class.new(matching_acls: delete_acls_array_ptr, matching_acls_count: 1) + end + + it "gets deleted acl resource type as Rdkafka::Bindings::RD_KAFKA_RESOURCE_TOPIC" do + assert_equal Rdkafka::Bindings::RD_KAFKA_RESOURCE_TOPIC, @report.deleted_acls[0].matching_acl_resource_type + end + + it "gets deleted acl resource name" do + assert_equal @resource_name, @report.deleted_acls[0].matching_acl_resource_name + end + + it "gets deleted acl resource pattern type as Rdkafka::Bindings::RD_KAFKA_RESOURCE_PATTERN_LITERAL" do + assert_equal Rdkafka::Bindings::RD_KAFKA_RESOURCE_PATTERN_LITERAL, @report.deleted_acls[0].matching_acl_resource_pattern_type + assert_equal Rdkafka::Bindings::RD_KAFKA_RESOURCE_PATTERN_LITERAL, @report.deleted_acls[0].matching_acl_pattern_type + end + + it "gets deleted acl principal as User:anonymous" do + assert_equal "User:anonymous", @report.deleted_acls[0].matching_acl_principal + end + + it "gets deleted acl host as *" do + assert_equal "*", @report.deleted_acls[0].matching_acl_host + end + + it "gets deleted acl operation as Rdkafka::Bindings::RD_KAFKA_ACL_OPERATION_READ" do + assert_equal Rdkafka::Bindings::RD_KAFKA_ACL_OPERATION_READ, @report.deleted_acls[0].matching_acl_operation + end + + it "gets deleted acl permission_type as Rdkafka::Bindings::RD_KAFKA_ACL_PERMISSION_TYPE_ALLOW" do + assert_equal Rdkafka::Bindings::RD_KAFKA_ACL_PERMISSION_TYPE_ALLOW, @report.deleted_acls[0].matching_acl_permission_type + end +end diff --git a/test/lib/rdkafka/admin/delete_topic_handle_test.rb b/test/lib/rdkafka/admin/delete_topic_handle_test.rb new file mode 100644 index 00000000..29bc987e --- /dev/null +++ b/test/lib/rdkafka/admin/delete_topic_handle_test.rb @@ -0,0 +1,70 @@ +# frozen_string_literal: true + +require_relative "../../../test_helper" + +describe Rdkafka::Admin::DeleteTopicHandle do + before do + @topic_name = TestTopics.unique + end + + describe "#wait" do + context "when pending" do + before do + @handle = described_class.new.tap do |handle| + handle[:pending] = true + handle[:response] = 0 + handle[:error_string] = FFI::Pointer::NULL + handle[:result_name] = FFI::MemoryPointer.from_string(@topic_name) + end + end + + it "waits until the timeout and then raise an error" do + e = assert_raises(Rdkafka::Admin::DeleteTopicHandle::WaitTimeoutError) { + @handle.wait(max_wait_timeout_ms: 100) + } + assert_match(/delete topic/, e.message) + end + end + + context "when not pending anymore and no error" do + before do + @handle = described_class.new.tap do |handle| + handle[:pending] = false + handle[:response] = 0 + handle[:error_string] = FFI::Pointer::NULL + handle[:result_name] = FFI::MemoryPointer.from_string(@topic_name) + end + end + + it "returns a delete topic report" do + report = @handle.wait + + assert_nil report.error_string + assert_equal @topic_name, report.result_name + end + + it "waits without a timeout" do + report = @handle.wait(max_wait_timeout_ms: nil) + + assert_nil report.error_string + assert_equal @topic_name, report.result_name + end + end + end + + describe "#raise_error" do + before do + @handle = described_class.new.tap do |handle| + handle[:pending] = false + handle[:response] = -1 + handle[:error_string] = FFI::Pointer::NULL + handle[:result_name] = FFI::MemoryPointer.from_string(@topic_name) + end + end + + it "raises the appropriate error" do + e = assert_raises(Rdkafka::RdkafkaError) { @handle.raise_error } + assert_match(/Unknown broker error \(unknown\)/, e.message) + end + end +end diff --git a/test/lib/rdkafka/admin/delete_topic_report_test.rb b/test/lib/rdkafka/admin/delete_topic_report_test.rb new file mode 100644 index 00000000..e86c8d45 --- /dev/null +++ b/test/lib/rdkafka/admin/delete_topic_report_test.rb @@ -0,0 +1,20 @@ +# frozen_string_literal: true + +require_relative "../../../test_helper" + +describe Rdkafka::Admin::DeleteTopicReport do + before do + @report = described_class.new( + FFI::MemoryPointer.from_string("error string"), + FFI::MemoryPointer.from_string("result name") + ) + end + + it "gets the error string" do + assert_equal "error string", @report.error_string + end + + it "gets the result name" do + assert_equal "result name", @report.result_name + end +end diff --git a/test/lib/rdkafka/admin/describe_acl_handle_test.rb b/test/lib/rdkafka/admin/describe_acl_handle_test.rb new file mode 100644 index 00000000..19b69fa3 --- /dev/null +++ b/test/lib/rdkafka/admin/describe_acl_handle_test.rb @@ -0,0 +1,88 @@ +# frozen_string_literal: true + +require_relative "../../../test_helper" + +describe Rdkafka::Admin::DescribeAclHandle do + before do + @resource_name = TestTopics.unique + @resource_type = Rdkafka::Bindings::RD_KAFKA_RESOURCE_TOPIC + @resource_pattern_type = Rdkafka::Bindings::RD_KAFKA_RESOURCE_PATTERN_LITERAL + @principal = "User:anonymous" + @host = "*" + @operation = Rdkafka::Bindings::RD_KAFKA_ACL_OPERATION_READ + @permission_type = Rdkafka::Bindings::RD_KAFKA_ACL_PERMISSION_TYPE_ALLOW + @response = Rdkafka::Bindings::RD_KAFKA_RESP_ERR_NO_ERROR + end + + def build_handle(pending:) + error_buffer = FFI::MemoryPointer.from_string(" " * 256) + describe_acl_ptr = Rdkafka::Bindings.rd_kafka_AclBinding_new( + @resource_type, + FFI::MemoryPointer.from_string(@resource_name), + @resource_pattern_type, + FFI::MemoryPointer.from_string(@principal), + FFI::MemoryPointer.from_string(@host), + @operation, + @permission_type, + error_buffer, + 256 + ) + if describe_acl_ptr.null? + raise Rdkafka::Config::ConfigError.new(error_buffer.read_string) + end + pointer_array = [describe_acl_ptr] + describe_acls_array_ptr = FFI::MemoryPointer.new(:pointer) + describe_acls_array_ptr.write_array_of_pointer(pointer_array) + described_class.new.tap do |handle| + handle[:pending] = pending + handle[:response] = @response + handle[:response_string] = FFI::MemoryPointer.from_string("") + handle[:acls] = describe_acls_array_ptr + handle[:acls_count] = 1 + end + end + + describe "#wait" do + context "when pending" do + before do + @handle = build_handle(pending: true) + end + + it "waits until the timeout and then raise an error" do + e = assert_raises(Rdkafka::Admin::DescribeAclHandle::WaitTimeoutError) { + @handle.wait(max_wait_timeout_ms: 100) + } + assert_match(/describe acl/, e.message) + end + end + + context "when not pending anymore and no error" do + before do + @handle = build_handle(pending: false) + end + + it "returns a describe acl report" do + report = @handle.wait + + assert_equal 1, report.acls.length + end + + it "waits without a timeout" do + report = @handle.wait(max_wait_timeout_ms: nil) + + assert_equal @resource_name, report.acls[0].matching_acl_resource_name + end + end + end + + describe "#raise_error" do + before do + @handle = build_handle(pending: false) + end + + it "raises the appropriate error" do + e = assert_raises(Rdkafka::RdkafkaError) { @handle.raise_error } + assert_match(/Success \(no_error\)/, e.message) + end + end +end diff --git a/test/lib/rdkafka/admin/describe_acl_report_test.rb b/test/lib/rdkafka/admin/describe_acl_report_test.rb new file mode 100644 index 00000000..a924a098 --- /dev/null +++ b/test/lib/rdkafka/admin/describe_acl_report_test.rb @@ -0,0 +1,64 @@ +# frozen_string_literal: true + +require_relative "../../../test_helper" + +describe Rdkafka::Admin::DescribeAclReport do + before do + @resource_name = TestTopics.unique + @resource_type = Rdkafka::Bindings::RD_KAFKA_RESOURCE_TOPIC + @resource_pattern_type = Rdkafka::Bindings::RD_KAFKA_RESOURCE_PATTERN_LITERAL + @principal = "User:anonymous" + @host = "*" + @operation = Rdkafka::Bindings::RD_KAFKA_ACL_OPERATION_READ + @permission_type = Rdkafka::Bindings::RD_KAFKA_ACL_PERMISSION_TYPE_ALLOW + + error_buffer = FFI::MemoryPointer.from_string(" " * 256) + describe_acl_ptr = Rdkafka::Bindings.rd_kafka_AclBinding_new( + @resource_type, + FFI::MemoryPointer.from_string(@resource_name), + @resource_pattern_type, + FFI::MemoryPointer.from_string(@principal), + FFI::MemoryPointer.from_string(@host), + @operation, + @permission_type, + error_buffer, + 256 + ) + if describe_acl_ptr.null? + raise Rdkafka::Config::ConfigError.new(error_buffer.read_string) + end + pointer_array = [describe_acl_ptr] + describe_acls_array_ptr = FFI::MemoryPointer.new(:pointer) + describe_acls_array_ptr.write_array_of_pointer(pointer_array) + @report = described_class.new(acls: describe_acls_array_ptr, acls_count: 1) + end + + it "gets matching acl resource type as Rdkafka::Bindings::RD_KAFKA_RESOURCE_TOPIC" do + assert_equal Rdkafka::Bindings::RD_KAFKA_RESOURCE_TOPIC, @report.acls[0].matching_acl_resource_type + end + + it "gets matching acl resource name" do + assert_equal @resource_name, @report.acls[0].matching_acl_resource_name + end + + it "gets matching acl resource pattern type as Rdkafka::Bindings::RD_KAFKA_RESOURCE_PATTERN_LITERAL" do + assert_equal Rdkafka::Bindings::RD_KAFKA_RESOURCE_PATTERN_LITERAL, @report.acls[0].matching_acl_resource_pattern_type + assert_equal Rdkafka::Bindings::RD_KAFKA_RESOURCE_PATTERN_LITERAL, @report.acls[0].matching_acl_pattern_type + end + + it "gets matching acl principal as User:anonymous" do + assert_equal "User:anonymous", @report.acls[0].matching_acl_principal + end + + it "gets matching acl host as *" do + assert_equal "*", @report.acls[0].matching_acl_host + end + + it "gets matching acl operation as Rdkafka::Bindings::RD_KAFKA_ACL_OPERATION_READ" do + assert_equal Rdkafka::Bindings::RD_KAFKA_ACL_OPERATION_READ, @report.acls[0].matching_acl_operation + end + + it "gets matching acl permission_type as Rdkafka::Bindings::RD_KAFKA_ACL_PERMISSION_TYPE_ALLOW" do + assert_equal Rdkafka::Bindings::RD_KAFKA_ACL_PERMISSION_TYPE_ALLOW, @report.acls[0].matching_acl_permission_type + end +end diff --git a/test/lib/rdkafka/admin/list_offsets_handle_test.rb b/test/lib/rdkafka/admin/list_offsets_handle_test.rb new file mode 100644 index 00000000..c6aad2da --- /dev/null +++ b/test/lib/rdkafka/admin/list_offsets_handle_test.rb @@ -0,0 +1,72 @@ +# frozen_string_literal: true + +require_relative "../../../test_helper" + +describe Rdkafka::Admin::ListOffsetsHandle do + before do + @response = Rdkafka::Bindings::RD_KAFKA_RESP_ERR_NO_ERROR + end + + describe "#wait" do + context "when pending" do + before do + @handle = described_class.new.tap do |handle| + handle[:pending] = true + handle[:response] = @response + handle[:response_string] = FFI::MemoryPointer.from_string("") + handle[:result_infos] = FFI::Pointer::NULL + handle[:result_count] = 0 + end + end + + it "waits until the timeout and then raises an error" do + e = assert_raises(Rdkafka::Admin::ListOffsetsHandle::WaitTimeoutError) { + @handle.wait(max_wait_timeout_ms: 100) + } + assert_match(/list offsets/, e.message) + end + end + + context "when not pending anymore and no error" do + before do + @handle = described_class.new.tap do |handle| + handle[:pending] = false + handle[:response] = @response + handle[:response_string] = FFI::MemoryPointer.from_string("") + handle[:result_infos] = FFI::Pointer::NULL + handle[:result_count] = 0 + end + end + + it "returns a list offsets report" do + report = @handle.wait + + assert_kind_of Rdkafka::Admin::ListOffsetsReport, report + assert_equal [], report.offsets + end + + it "waits without a timeout" do + report = @handle.wait(max_wait_timeout_ms: nil) + + assert_equal [], report.offsets + end + end + end + + describe "#raise_error" do + before do + @handle = described_class.new.tap do |handle| + handle[:pending] = false + handle[:response] = @response + handle[:response_string] = FFI::MemoryPointer.from_string("") + handle[:result_infos] = FFI::Pointer::NULL + handle[:result_count] = 0 + end + end + + it "raises the appropriate error" do + e = assert_raises(Rdkafka::RdkafkaError) { @handle.raise_error } + assert_match(/Success \(no_error\)/, e.message) + end + end +end diff --git a/test/lib/rdkafka/admin/list_offsets_report_test.rb b/test/lib/rdkafka/admin/list_offsets_report_test.rb new file mode 100644 index 00000000..c8f19bdb --- /dev/null +++ b/test/lib/rdkafka/admin/list_offsets_report_test.rb @@ -0,0 +1,17 @@ +# frozen_string_literal: true + +require_relative "../../../test_helper" + +describe Rdkafka::Admin::ListOffsetsReport do + describe "#initialize" do + context "when result_infos is NULL" do + before do + @report = described_class.new(result_infos: FFI::Pointer::NULL, result_count: 0) + end + + it "returns empty offsets" do + assert_equal [], @report.offsets + end + end + end +end diff --git a/test/lib/rdkafka/admin_test.rb b/test/lib/rdkafka/admin_test.rb new file mode 100644 index 00000000..50e51a05 --- /dev/null +++ b/test/lib/rdkafka/admin_test.rb @@ -0,0 +1,1208 @@ +# frozen_string_literal: true + +require "ostruct" + +require_relative "../../test_helper" + +describe Rdkafka::Admin do + before do + @config = rdkafka_config + @topic_name = "test-topic-#{SecureRandom.uuid}" + @topic_partition_count = 3 + @topic_replication_factor = 1 + @topic_config = { "cleanup.policy" => "compact", "min.cleanable.dirty.ratio" => 0.8 } + @invalid_topic_config = { "cleeeeenup.policee" => "campact" } + @group_name = "test-group-#{SecureRandom.uuid}" + @resource_name = TestTopics.unique + @resource_type = Rdkafka::Bindings::RD_KAFKA_RESOURCE_TOPIC + @resource_pattern_type = Rdkafka::Bindings::RD_KAFKA_RESOURCE_PATTERN_LITERAL + @principal = "User:anonymous" + @host = "*" + @operation = Rdkafka::Bindings::RD_KAFKA_ACL_OPERATION_READ + @permission_type = Rdkafka::Bindings::RD_KAFKA_ACL_PERMISSION_TYPE_ALLOW + end + + def admin + @admin ||= @config.admin + end + + after do + # Registry should always end up being empty + assert_empty Rdkafka::Admin::CreateTopicHandle::REGISTRY + assert_empty Rdkafka::Admin::CreatePartitionsHandle::REGISTRY + assert_empty Rdkafka::Admin::DescribeAclHandle::REGISTRY + assert_empty Rdkafka::Admin::CreateAclHandle::REGISTRY + assert_empty Rdkafka::Admin::DeleteAclHandle::REGISTRY + assert_empty Rdkafka::Admin::ListOffsetsHandle::REGISTRY + admin.close + end + + describe "#describe_errors" do + it "has 172 errors" do + errors = admin.class.describe_errors + assert_equal 172, errors.size + end + + it "has the correct error for code -184" do + errors = admin.class.describe_errors + assert_equal({ code: -184, description: "Local: Queue full", name: "_QUEUE_FULL" }, errors[-184]) + end + + it "has the correct error for code 21" do + errors = admin.class.describe_errors + assert_equal({ code: 21, description: "Broker: Invalid required acks value", name: "INVALID_REQUIRED_ACKS" }, errors[21]) + end + end + + describe "admin without auto-start" do + it "expect to be able to start it later and close" do + @admin = @config.admin(native_kafka_auto_start: false) + admin.start + admin.close + end + + it "expect to be able to close it without starting" do + @admin = @config.admin(native_kafka_auto_start: false) + admin.close + end + end + + describe "#create_topic" do + describe "called with invalid input" do + describe "with an invalid topic name" do + it "raises an exception" do + topic_name = "[!@#]" + create_topic_handle = admin.create_topic(topic_name, @topic_partition_count, @topic_replication_factor) + ex = assert_raises(Rdkafka::RdkafkaError) do + create_topic_handle.wait(max_wait_timeout_ms: 15_000) + end + assert_kind_of Rdkafka::RdkafkaError, ex + assert_match(/Broker: Invalid topic \(topic_exception\)/, ex.message) + assert_match(/Topic name.*is invalid: .* contains one or more characters other than ASCII alphanumerics, '.', '_' and '-'/, ex.broker_message) + end + end + + describe "with the name of a topic that already exists" do + it "raises an exception" do + existing_topic_name = TestTopics.create + create_topic_handle = admin.create_topic(existing_topic_name, @topic_partition_count, @topic_replication_factor) + ex = assert_raises(Rdkafka::RdkafkaError) do + create_topic_handle.wait(max_wait_timeout_ms: 15_000) + end + assert_kind_of Rdkafka::RdkafkaError, ex + assert_match(/Broker: Topic already exists \(topic_already_exists\)/, ex.message) + assert_match(/Topic '#{Regexp.escape(existing_topic_name)}' already exists/, ex.broker_message) + end + end + + describe "with an invalid partition count" do + it "raises an exception" do + e = assert_raises(Rdkafka::Config::ConfigError) do + admin.create_topic(@topic_name, -999, @topic_replication_factor) + end + assert_match(/num_partitions out of expected range/, e.message) + end + end + + describe "with an invalid replication factor" do + it "raises an exception" do + e = assert_raises(Rdkafka::Config::ConfigError) do + admin.create_topic(@topic_name, @topic_partition_count, -2) + end + assert_match(/replication_factor out of expected range/, e.message) + end + end + + describe "with an invalid topic configuration" do + it "doesn't create the topic" do + create_topic_handle = admin.create_topic(@topic_name, @topic_partition_count, @topic_replication_factor, @invalid_topic_config) + e = assert_raises(Rdkafka::RdkafkaError) do + create_topic_handle.wait(max_wait_timeout_ms: 15_000) + end + assert_match(/Broker: Configuration is invalid \(invalid_config\)/, e.message) + end + end + end + + context "edge case" do + context "where we are unable to get the background queue" do + before do + Rdkafka::Bindings.stubs(:rd_kafka_queue_get_background).returns(FFI::Pointer::NULL) + end + + it "raises an exception" do + e = assert_raises(Rdkafka::Config::ConfigError) do + admin.create_topic(@topic_name, @topic_partition_count, @topic_replication_factor) + end + assert_match(/rd_kafka_queue_get_background was NULL/, e.message) + end + end + + context "where rd_kafka_CreateTopics raises an exception" do + before do + Rdkafka::Bindings.stubs(:rd_kafka_CreateTopics).raises(RuntimeError.new("oops")) + end + + it "raises an exception" do + e = assert_raises(RuntimeError) do + admin.create_topic(@topic_name, @topic_partition_count, @topic_replication_factor) + end + assert_match(/oops/, e.message) + end + end + end + + it "creates a topic" do + create_topic_handle = admin.create_topic(@topic_name, @topic_partition_count, @topic_replication_factor, @topic_config) + create_topic_report = create_topic_handle.wait(max_wait_timeout_ms: 15_000) + assert_nil create_topic_report.error_string + assert_equal @topic_name, create_topic_report.result_name + end + end + + describe "describe_configs" do + before do + admin.create_topic(@topic_name, 2, 1).wait + sleep(1) + end + + context "when describing config of an existing topic" do + it "returns the config" do + resources = [{ resource_type: 2, resource_name: @topic_name }] + resources_results = admin.describe_configs(resources).wait.resources + assert_equal 1, resources_results.size + assert_equal 2, resources_results.first.type + assert_equal @topic_name, resources_results.first.name + assert_operator resources_results.first.configs.size, :>, 25 + assert_equal "compression.type", resources_results.first.configs.first.name + assert_equal "producer", resources_results.first.configs.first.value + refute_empty resources_results.first.configs.map(&:synonyms) + end + end + + context "when describing config of a non-existing topic" do + it "expect to raise error" do + resources = [{ resource_type: 2, resource_name: SecureRandom.uuid }] + assert_raises(Rdkafka::RdkafkaError) do + admin.describe_configs(resources).wait.resources + end + end + end + + context "when describing both existing and non-existing topics" do + it "expect to raise error" do + resources = [ + { resource_type: 2, resource_name: @topic_name }, + { resource_type: 2, resource_name: SecureRandom.uuid } + ] + assert_raises(Rdkafka::RdkafkaError) do + admin.describe_configs(resources).wait.resources + end + end + end + + context "when describing multiple existing topics" do + it "returns configs for both" do + resources = [ + { resource_type: 2, resource_name: TestTopics.example_topic }, + { resource_type: 2, resource_name: @topic_name } + ] + resources_results = admin.describe_configs(resources).wait.resources + assert_equal 2, resources_results.size + assert_equal 2, resources_results.first.type + assert_equal TestTopics.example_topic, resources_results.first.name + assert_equal 2, resources_results.last.type + assert_equal @topic_name, resources_results.last.name + end + end + + context "when trying to describe invalid resource type" do + it "expect to raise error" do + resources = [{ resource_type: 0, resource_name: SecureRandom.uuid }] + assert_raises(Rdkafka::RdkafkaError) do + admin.describe_configs(resources).wait.resources + end + end + end + + context "when trying to describe invalid broker" do + it "expect to raise error" do + resources = [{ resource_type: 4, resource_name: "non-existing" }] + assert_raises(Rdkafka::RdkafkaError) do + admin.describe_configs(resources).wait.resources + end + end + end + + context "when trying to describe valid broker" do + it "returns broker config" do + resources = [{ resource_type: 4, resource_name: "1" }] + resources_results = admin.describe_configs(resources).wait.resources + assert_equal 1, resources_results.size + assert_equal 4, resources_results.first.type + assert_equal "1", resources_results.first.name + assert_operator resources_results.first.configs.size, :>, 230 + assert_equal "log.cleaner.min.compaction.lag.ms", resources_results.first.configs.first.name + assert_equal "0", resources_results.first.configs.first.value + refute_empty resources_results.first.configs.map(&:synonyms) + end + end + + context "when describing valid broker with topics in one request" do + it "returns both broker and topic configs" do + resources = [ + { resource_type: 4, resource_name: "1" }, + { resource_type: 2, resource_name: @topic_name } + ] + resources_results = admin.describe_configs(resources).wait.resources + assert_equal 2, resources_results.size + assert_equal 4, resources_results.first.type + assert_equal "1", resources_results.first.name + assert_operator resources_results.first.configs.size, :>, 230 + assert_equal "log.cleaner.min.compaction.lag.ms", resources_results.first.configs.first.name + assert_equal "0", resources_results.first.configs.first.value + assert_equal 2, resources_results.last.type + assert_equal @topic_name, resources_results.last.name + assert_operator resources_results.last.configs.size, :>, 25 + assert_equal "compression.type", resources_results.last.configs.first.name + assert_equal "producer", resources_results.last.configs.first.value + end + end + end + + describe "incremental_alter_configs" do + before do + admin.create_topic(@topic_name, 2, 1).wait + sleep(1) + end + + context "when altering one topic with one valid config via set" do + it "alters the config" do + target_retention = rand(86400002..86410001).to_s + resources_with_configs = [ + { + resource_type: 2, + resource_name: @topic_name, + configs: [ + { + name: "delete.retention.ms", + value: target_retention, + op_type: 0 + } + ] + } + ] + resources_results = admin.incremental_alter_configs(resources_with_configs).wait.resources + assert_equal 1, resources_results.size + assert_equal 2, resources_results.first.type + assert_equal @topic_name, resources_results.first.name + + sleep(1) + + ret_config = admin.describe_configs(resources_with_configs).wait.resources.first.configs.find do |config| + config.name == "delete.retention.ms" + end + + assert_equal target_retention, ret_config.value + end + end + + context "when altering one topic with one valid config via delete" do + it "resets the config to default" do + target_retention = rand(8640002..8650001).to_s + resources_with_configs = [ + { + resource_type: 2, + resource_name: @topic_name, + configs: [ + { + name: "delete.retention.ms", + value: target_retention, + op_type: 1 + } + ] + } + ] + resources_results = admin.incremental_alter_configs(resources_with_configs).wait.resources + assert_equal 1, resources_results.size + assert_equal 2, resources_results.first.type + assert_equal @topic_name, resources_results.first.name + + sleep(1) + + ret_config = admin.describe_configs(resources_with_configs).wait.resources.first.configs.find do |config| + config.name == "delete.retention.ms" + end + + assert_equal "86400000", ret_config.value + end + end + + context "when altering one topic with one valid config via append" do + it "appends to the config" do + target_policy = "compact" + resources_with_configs = [ + { + resource_type: 2, + resource_name: @topic_name, + configs: [ + { + name: "cleanup.policy", + value: target_policy, + op_type: 2 + } + ] + } + ] + resources_results = admin.incremental_alter_configs(resources_with_configs).wait.resources + assert_equal 1, resources_results.size + assert_equal 2, resources_results.first.type + assert_equal @topic_name, resources_results.first.name + + sleep(1) + + ret_config = admin.describe_configs(resources_with_configs).wait.resources.first.configs.find do |config| + config.name == "cleanup.policy" + end + + assert_equal "delete,#{target_policy}", ret_config.value + end + end + + context "when altering one topic with one valid config via subtrack" do + it "subtracts from the config" do + target_policy = "delete" + resources_with_configs = [ + { + resource_type: 2, + resource_name: @topic_name, + configs: [ + { + name: "cleanup.policy", + value: target_policy, + op_type: 3 + } + ] + } + ] + resources_results = admin.incremental_alter_configs(resources_with_configs).wait.resources + assert_equal 1, resources_results.size + assert_equal 2, resources_results.first.type + assert_equal @topic_name, resources_results.first.name + + sleep(1) + + ret_config = admin.describe_configs(resources_with_configs).wait.resources.first.configs.find do |config| + config.name == "cleanup.policy" + end + + assert_equal "", ret_config.value + end + end + + context "when altering one topic with invalid config" do + it "expect to raise error" do + target_retention = "-10" + resources_with_configs = [ + { + resource_type: 2, + resource_name: @topic_name, + configs: [ + { + name: "delete.retention.ms", + value: target_retention, + op_type: 0 + } + ] + } + ] + e = assert_raises(Rdkafka::RdkafkaError) do + admin.incremental_alter_configs(resources_with_configs).wait.resources + end + assert_match(/invalid_config/, e.message) + end + end + end + + describe "#list_offsets" do + context "when querying offsets for an existing topic with messages" do + before do + @lo_topic = TestTopics.create + # Produce a message to ensure partition leaders are fully established + lo_producer = rdkafka_config.producer + lo_producer.produce(topic: @lo_topic, payload: "warmup", partition: 0).wait + lo_producer.close + end + + it "returns earliest offsets" do + report = admin.list_offsets( + { @lo_topic => [{ partition: 0, offset: :earliest }] } + ).wait(max_wait_timeout_ms: 15_000) + + assert_kind_of Rdkafka::Admin::ListOffsetsReport, report + assert_operator report.offsets.length, :>=, 1 + + first = report.offsets.first + assert_equal @lo_topic, first[:topic] + assert_equal 0, first[:partition] + assert_operator first[:offset], :>=, 0 + end + + it "returns latest offsets" do + report = admin.list_offsets( + { @lo_topic => [{ partition: 0, offset: :latest }] } + ).wait(max_wait_timeout_ms: 15_000) + + assert_operator report.offsets.length, :>=, 1 + + first = report.offsets.first + assert_equal @lo_topic, first[:topic] + assert_equal 0, first[:partition] + assert_operator first[:offset], :>=, 0 + end + + it "returns offsets for multiple partitions at once" do + report = admin.list_offsets( + { @lo_topic => [ + { partition: 0, offset: :earliest }, + { partition: 1, offset: :latest } + ] } + ).wait(max_wait_timeout_ms: 15_000) + + assert_equal 2, report.offsets.length + assert_equal [0, 1], report.offsets.map { |o| o[:partition] }.sort + end + + it "returns offsets with read_committed isolation level" do + report = admin.list_offsets( + { @lo_topic => [{ partition: 0, offset: :latest }] }, + isolation_level: Rdkafka::Bindings::RD_KAFKA_ISOLATION_LEVEL_READ_COMMITTED + ).wait(max_wait_timeout_ms: 15_000) + + assert_equal 1, report.offsets.length + end + end + + context "when querying offsets by timestamp" do + it "returns offsets for a given timestamp" do + lo_topic = TestTopics.create + # Use a timestamp of 0 (epoch) to get earliest messages. + # Retry on transient broker errors (not_leader_for_partition) that can + # occur when partition leadership hasn't fully settled after topic creation. + report = nil + 3.times do + report = admin.list_offsets( + { lo_topic => [{ partition: 0, offset: 0 }] } + ).wait(max_wait_timeout_ms: 15_000) + break + rescue Rdkafka::RdkafkaError => e + raise unless e.message.include?("not_leader_for_partition") + + sleep(1) + end + + assert_equal 1, report.offsets.length + first = report.offsets.first + assert_equal lo_topic, first[:topic] + assert_equal 0, first[:partition] + end + end + + context "when admin is closed" do + it "raises ClosedAdminError" do + admin.close + assert_raises(Rdkafka::ClosedAdminError) do + admin.list_offsets({ "topic" => [{ partition: 0, offset: :earliest }] }) + end + end + end + + context "edge case" do + context "where we are unable to get the background queue" do + before do + Rdkafka::Bindings.stubs(:rd_kafka_queue_get_background).returns(FFI::Pointer::NULL) + end + + it "raises an exception" do + e = assert_raises(Rdkafka::Config::ConfigError) do + admin.list_offsets({ "topic" => [{ partition: 0, offset: :earliest }] }) + end + assert_match(/rd_kafka_queue_get_background was NULL/, e.message) + end + end + + context "where rd_kafka_ListOffsets raises an exception" do + before do + Rdkafka::Bindings.stubs(:rd_kafka_ListOffsets).raises(RuntimeError.new("oops")) + end + + it "raises an exception" do + e = assert_raises(RuntimeError) do + admin.list_offsets({ "topic" => [{ partition: 0, offset: :earliest }] }) + end + assert_match(/oops/, e.message) + end + end + end + + context "with invalid offset specification" do + it "raises ArgumentError for unknown symbol" do + e = assert_raises(ArgumentError) do + admin.list_offsets({ "topic" => [{ partition: 0, offset: :unknown }] }) + end + assert_match(/Unknown offset specification/, e.message) + end + end + end + + describe "#delete_topic" do + describe "called with invalid input" do + describe "with an invalid topic name" do + it "raises an exception" do + topic_name = "[!@#]" + delete_topic_handle = admin.delete_topic(topic_name) + ex = assert_raises(Rdkafka::RdkafkaError) do + delete_topic_handle.wait(max_wait_timeout_ms: 15_000) + end + assert_kind_of Rdkafka::RdkafkaError, ex + assert_match(/Broker: Unknown topic or partition \(unknown_topic_or_part\)/, ex.message) + assert_match(/Broker: Unknown topic or partition/, ex.broker_message) + end + end + + describe "with the name of a topic that does not exist" do + it "raises an exception" do + delete_topic_handle = admin.delete_topic(@topic_name) + ex = assert_raises(Rdkafka::RdkafkaError) do + delete_topic_handle.wait(max_wait_timeout_ms: 15_000) + end + assert_kind_of Rdkafka::RdkafkaError, ex + assert_match(/Broker: Unknown topic or partition \(unknown_topic_or_part\)/, ex.message) + assert_match(/Broker: Unknown topic or partition/, ex.broker_message) + end + end + end + + context "edge case" do + context "where we are unable to get the background queue" do + before do + Rdkafka::Bindings.stubs(:rd_kafka_queue_get_background).returns(FFI::Pointer::NULL) + end + + it "raises an exception" do + e = assert_raises(Rdkafka::Config::ConfigError) do + admin.delete_topic(@topic_name) + end + assert_match(/rd_kafka_queue_get_background was NULL/, e.message) + end + end + + context "where rd_kafka_DeleteTopics raises an exception" do + before do + Rdkafka::Bindings.stubs(:rd_kafka_DeleteTopics).raises(RuntimeError.new("oops")) + end + + it "raises an exception" do + e = assert_raises(RuntimeError) do + admin.delete_topic(@topic_name) + end + assert_match(/oops/, e.message) + end + end + end + + it "deletes a topic that was newly created" do + create_topic_handle = admin.create_topic(@topic_name, @topic_partition_count, @topic_replication_factor) + create_topic_report = create_topic_handle.wait(max_wait_timeout_ms: 15_000) + assert_nil create_topic_report.error_string + assert_equal @topic_name, create_topic_report.result_name + + # Retry topic deletion a few times. On CI Kafka seems to not + # always be ready for it immediately + delete_topic_report = nil + 10.times do |i| + delete_topic_handle = admin.delete_topic(@topic_name) + delete_topic_report = delete_topic_handle.wait(max_wait_timeout_ms: 15_000) + break + rescue Rdkafka::RdkafkaError => ex + if i > 3 + raise ex + end + end + + assert_nil delete_topic_report.error_string + assert_equal @topic_name, delete_topic_report.result_name + end + end + + describe "#ACL tests for topic resource" do + before do + @non_existing_resource_name = "non-existing-topic" + # create topic for testing acl + create_topic_handle = admin.create_topic(@resource_name, @topic_partition_count, @topic_replication_factor) + create_topic_handle.wait(max_wait_timeout_ms: 15_000) + end + + after do + # delete acl + delete_acl_handle = admin.delete_acl(resource_type: @resource_type, resource_name: @resource_name, resource_pattern_type: @resource_pattern_type, principal: @principal, host: @host, operation: @operation, permission_type: @permission_type) + delete_acl_handle.wait(max_wait_timeout_ms: 15_000) + + # delete topic that was created for testing acl + delete_topic_handle = admin.delete_topic(@resource_name) + delete_topic_handle.wait(max_wait_timeout_ms: 15_000) + end + + describe "#create_acl" do + it "create acl for a topic that does not exist" do + # acl creation for resources that does not exist will still get created successfully. + create_acl_handle = admin.create_acl(resource_type: @resource_type, resource_name: @non_existing_resource_name, resource_pattern_type: @resource_pattern_type, principal: @principal, host: @host, operation: @operation, permission_type: @permission_type) + create_acl_report = create_acl_handle.wait(max_wait_timeout_ms: 15_000) + assert_equal 0, create_acl_report.rdkafka_response + assert_equal "", create_acl_report.rdkafka_response_string + + # delete the acl that was created for a non existing topic" + delete_acl_handle = admin.delete_acl(resource_type: @resource_type, resource_name: @non_existing_resource_name, resource_pattern_type: @resource_pattern_type, principal: @principal, host: @host, operation: @operation, permission_type: @permission_type) + delete_acl_report = delete_acl_handle.wait(max_wait_timeout_ms: 15_000) + assert_equal 0, delete_acl_handle[:response] + assert_equal 1, delete_acl_report.deleted_acls.size + end + + it "creates a acl for topic that was newly created" do + create_acl_handle = admin.create_acl(resource_type: @resource_type, resource_name: @resource_name, resource_pattern_type: @resource_pattern_type, principal: @principal, host: @host, operation: @operation, permission_type: @permission_type) + create_acl_report = create_acl_handle.wait(max_wait_timeout_ms: 15_000) + assert_equal 0, create_acl_report.rdkafka_response + assert_equal "", create_acl_report.rdkafka_response_string + end + end + + describe "#describe_acl" do + it "describe acl of a topic that does not exist" do + describe_acl_handle = admin.describe_acl(resource_type: @resource_type, resource_name: @non_existing_resource_name, resource_pattern_type: @resource_pattern_type, principal: @principal, host: @host, operation: @operation, permission_type: @permission_type) + describe_acl_report = describe_acl_handle.wait(max_wait_timeout_ms: 15_000) + assert_equal 0, describe_acl_handle[:response] + assert_equal 0, describe_acl_report.acls.size + end + + it "create acls and describe the newly created acls" do + # create_acl + create_acl_handle = admin.create_acl(resource_type: @resource_type, resource_name: "test_acl_topic_1", resource_pattern_type: @resource_pattern_type, principal: @principal, host: @host, operation: @operation, permission_type: @permission_type) + create_acl_report = create_acl_handle.wait(max_wait_timeout_ms: 15_000) + assert_equal 0, create_acl_report.rdkafka_response + assert_equal "", create_acl_report.rdkafka_response_string + + create_acl_handle = admin.create_acl(resource_type: @resource_type, resource_name: "test_acl_topic_2", resource_pattern_type: @resource_pattern_type, principal: @principal, host: @host, operation: @operation, permission_type: @permission_type) + create_acl_report = create_acl_handle.wait(max_wait_timeout_ms: 15_000) + assert_equal 0, create_acl_report.rdkafka_response + assert_equal "", create_acl_report.rdkafka_response_string + + # Since we create and immediately check, this is slow on loaded CIs, hence we wait + sleep(2) + + # describe_acl + describe_acl_handle = admin.describe_acl(resource_type: Rdkafka::Bindings::RD_KAFKA_RESOURCE_ANY, resource_name: nil, resource_pattern_type: Rdkafka::Bindings::RD_KAFKA_RESOURCE_PATTERN_ANY, principal: nil, host: nil, operation: Rdkafka::Bindings::RD_KAFKA_ACL_OPERATION_ANY, permission_type: Rdkafka::Bindings::RD_KAFKA_ACL_PERMISSION_TYPE_ANY) + describe_acl_report = describe_acl_handle.wait(max_wait_timeout_ms: 15_000) + assert_equal 0, describe_acl_handle[:response] + assert_equal 2, describe_acl_report.acls.length + end + end + + describe "#delete_acl" do + it "delete acl of a topic that does not exist" do + delete_acl_handle = admin.delete_acl(resource_type: @resource_type, resource_name: @non_existing_resource_name, resource_pattern_type: @resource_pattern_type, principal: @principal, host: @host, operation: @operation, permission_type: @permission_type) + delete_acl_report = delete_acl_handle.wait(max_wait_timeout_ms: 15_000) + assert_equal 0, delete_acl_handle[:response] + assert_equal 0, delete_acl_report.deleted_acls.size + end + + it "create an acl and delete the newly created acl" do + # create_acl + create_acl_handle = admin.create_acl(resource_type: @resource_type, resource_name: "test_acl_topic_1", resource_pattern_type: @resource_pattern_type, principal: @principal, host: @host, operation: @operation, permission_type: @permission_type) + create_acl_report = create_acl_handle.wait(max_wait_timeout_ms: 15_000) + assert_equal 0, create_acl_report.rdkafka_response + assert_equal "", create_acl_report.rdkafka_response_string + + create_acl_handle = admin.create_acl(resource_type: @resource_type, resource_name: "test_acl_topic_2", resource_pattern_type: @resource_pattern_type, principal: @principal, host: @host, operation: @operation, permission_type: @permission_type) + create_acl_report = create_acl_handle.wait(max_wait_timeout_ms: 15_000) + assert_equal 0, create_acl_report.rdkafka_response + assert_equal "", create_acl_report.rdkafka_response_string + + # delete_acl - resource_name nil - to delete all acls with any resource name and matching all other filters. + delete_acl_handle = admin.delete_acl(resource_type: @resource_type, resource_name: nil, resource_pattern_type: @resource_pattern_type, principal: @principal, host: @host, operation: @operation, permission_type: @permission_type) + delete_acl_report = delete_acl_handle.wait(max_wait_timeout_ms: 15_000) + assert_equal 0, delete_acl_handle[:response] + assert_equal 2, delete_acl_report.deleted_acls.length + end + end + end + + describe "#ACL tests for transactional_id" do + before do + @transactional_id_resource_name = "test-transactional-id" + @non_existing_transactional_id = "non-existing-transactional-id" + @transactional_id_resource_type = Rdkafka::Bindings::RD_KAFKA_RESOURCE_TRANSACTIONAL_ID + @transactional_id_resource_pattern_type = Rdkafka::Bindings::RD_KAFKA_RESOURCE_PATTERN_LITERAL + @transactional_id_principal = "User:test-user" + @transactional_id_host = "*" + @transactional_id_operation = Rdkafka::Bindings::RD_KAFKA_ACL_OPERATION_WRITE + @transactional_id_permission_type = Rdkafka::Bindings::RD_KAFKA_ACL_PERMISSION_TYPE_ALLOW + end + + after do + # Clean up any ACLs that might have been created during tests + delete_acl_handle = admin.delete_acl( + resource_type: @transactional_id_resource_type, + resource_name: nil, + resource_pattern_type: @transactional_id_resource_pattern_type, + principal: @transactional_id_principal, + host: @transactional_id_host, + operation: @transactional_id_operation, + permission_type: @transactional_id_permission_type + ) + delete_acl_handle.wait(max_wait_timeout_ms: 15_000) + rescue + # Ignore cleanup errors + end + + describe "#create_acl" do + it "creates acl for a transactional_id" do + create_acl_handle = admin.create_acl( + resource_type: @transactional_id_resource_type, + resource_name: @transactional_id_resource_name, + resource_pattern_type: @transactional_id_resource_pattern_type, + principal: @transactional_id_principal, + host: @transactional_id_host, + operation: @transactional_id_operation, + permission_type: @transactional_id_permission_type + ) + create_acl_report = create_acl_handle.wait(max_wait_timeout_ms: 15_000) + assert_equal 0, create_acl_report.rdkafka_response + assert_equal "", create_acl_report.rdkafka_response_string + end + + it "creates acl for a non-existing transactional_id" do + # ACL creation for transactional_ids that don't exist will still get created successfully + create_acl_handle = admin.create_acl( + resource_type: @transactional_id_resource_type, + resource_name: @non_existing_transactional_id, + resource_pattern_type: @transactional_id_resource_pattern_type, + principal: @transactional_id_principal, + host: @transactional_id_host, + operation: @transactional_id_operation, + permission_type: @transactional_id_permission_type + ) + create_acl_report = create_acl_handle.wait(max_wait_timeout_ms: 15_000) + assert_equal 0, create_acl_report.rdkafka_response + assert_equal "", create_acl_report.rdkafka_response_string + + # Clean up the ACL that was created for the non-existing transactional_id + delete_acl_handle = admin.delete_acl( + resource_type: @transactional_id_resource_type, + resource_name: @non_existing_transactional_id, + resource_pattern_type: @transactional_id_resource_pattern_type, + principal: @transactional_id_principal, + host: @transactional_id_host, + operation: @transactional_id_operation, + permission_type: @transactional_id_permission_type + ) + delete_acl_report = delete_acl_handle.wait(max_wait_timeout_ms: 15_000) + assert_equal 0, delete_acl_handle[:response] + assert_equal 1, delete_acl_report.deleted_acls.size + end + end + + describe "#describe_acl" do + it "describes acl of a transactional_id that does not exist" do + describe_acl_handle = admin.describe_acl( + resource_type: @transactional_id_resource_type, + resource_name: @non_existing_transactional_id, + resource_pattern_type: @transactional_id_resource_pattern_type, + principal: @transactional_id_principal, + host: @transactional_id_host, + operation: @transactional_id_operation, + permission_type: @transactional_id_permission_type + ) + describe_acl_report = describe_acl_handle.wait(max_wait_timeout_ms: 15_000) + assert_equal 0, describe_acl_handle[:response] + assert_equal 0, describe_acl_report.acls.size + end + + it "creates acls and describes the newly created transactional_id acls" do + # Create first ACL + create_acl_handle = admin.create_acl( + resource_type: @transactional_id_resource_type, + resource_name: "test_transactional_id_1", + resource_pattern_type: @transactional_id_resource_pattern_type, + principal: @transactional_id_principal, + host: @transactional_id_host, + operation: @transactional_id_operation, + permission_type: @transactional_id_permission_type + ) + create_acl_report = create_acl_handle.wait(max_wait_timeout_ms: 15_000) + assert_equal 0, create_acl_report.rdkafka_response + assert_equal "", create_acl_report.rdkafka_response_string + + # Create second ACL + create_acl_handle = admin.create_acl( + resource_type: @transactional_id_resource_type, + resource_name: "test_transactional_id_2", + resource_pattern_type: @transactional_id_resource_pattern_type, + principal: @transactional_id_principal, + host: @transactional_id_host, + operation: @transactional_id_operation, + permission_type: @transactional_id_permission_type + ) + create_acl_report = create_acl_handle.wait(max_wait_timeout_ms: 15_000) + assert_equal 0, create_acl_report.rdkafka_response + assert_equal "", create_acl_report.rdkafka_response_string + + # Since we create and immediately check, this is slow on loaded CIs, hence we wait + sleep(2) + + # Describe ACLs - filter by transactional_id resource type + describe_acl_handle = admin.describe_acl( + resource_type: @transactional_id_resource_type, + resource_name: nil, + resource_pattern_type: Rdkafka::Bindings::RD_KAFKA_RESOURCE_PATTERN_ANY, + principal: @transactional_id_principal, + host: @transactional_id_host, + operation: @transactional_id_operation, + permission_type: @transactional_id_permission_type + ) + describe_acl_report = describe_acl_handle.wait(max_wait_timeout_ms: 15_000) + assert_equal 0, describe_acl_handle[:response] + assert_equal 2, describe_acl_report.acls.length + end + end + + describe "#delete_acl" do + it "deletes acl of a transactional_id that does not exist" do + delete_acl_handle = admin.delete_acl( + resource_type: @transactional_id_resource_type, + resource_name: @non_existing_transactional_id, + resource_pattern_type: @transactional_id_resource_pattern_type, + principal: @transactional_id_principal, + host: @transactional_id_host, + operation: @transactional_id_operation, + permission_type: @transactional_id_permission_type + ) + delete_acl_report = delete_acl_handle.wait(max_wait_timeout_ms: 15_000) + assert_equal 0, delete_acl_handle[:response] + assert_equal 0, delete_acl_report.deleted_acls.size + end + + it "creates transactional_id acls and deletes the newly created acls" do + # Create first ACL + create_acl_handle = admin.create_acl( + resource_type: @transactional_id_resource_type, + resource_name: "test_transactional_id_1", + resource_pattern_type: @transactional_id_resource_pattern_type, + principal: @transactional_id_principal, + host: @transactional_id_host, + operation: @transactional_id_operation, + permission_type: @transactional_id_permission_type + ) + create_acl_report = create_acl_handle.wait(max_wait_timeout_ms: 15_000) + assert_equal 0, create_acl_report.rdkafka_response + assert_equal "", create_acl_report.rdkafka_response_string + + # Create second ACL + create_acl_handle = admin.create_acl( + resource_type: @transactional_id_resource_type, + resource_name: "test_transactional_id_2", + resource_pattern_type: @transactional_id_resource_pattern_type, + principal: @transactional_id_principal, + host: @transactional_id_host, + operation: @transactional_id_operation, + permission_type: @transactional_id_permission_type + ) + create_acl_report = create_acl_handle.wait(max_wait_timeout_ms: 15_000) + assert_equal 0, create_acl_report.rdkafka_response + assert_equal "", create_acl_report.rdkafka_response_string + + # Delete ACLs - resource_name nil to delete all ACLs with any resource name and matching all other filters + delete_acl_handle = admin.delete_acl( + resource_type: @transactional_id_resource_type, + resource_name: nil, + resource_pattern_type: @transactional_id_resource_pattern_type, + principal: @transactional_id_principal, + host: @transactional_id_host, + operation: @transactional_id_operation, + permission_type: @transactional_id_permission_type + ) + delete_acl_report = delete_acl_handle.wait(max_wait_timeout_ms: 15_000) + assert_equal 0, delete_acl_handle[:response] + assert_equal 2, delete_acl_report.deleted_acls.length + end + end + end + + describe "Group tests" do + describe "#delete_group" do + describe "with an existing group" do + before do + @group_producer = rdkafka_producer_config.producer + @group_consumer = rdkafka_consumer_config("group.id": @group_name).consumer + + # Create a topic, post a message to it, consume it and commit offsets, this will create a group that we can then delete. + admin.create_topic(@topic_name, @topic_partition_count, @topic_replication_factor).wait(max_wait_timeout_ms: 15_000) + + @group_producer.produce(topic: @topic_name, payload: "test", key: "test").wait(max_wait_timeout_ms: 15_000) + + @group_consumer.subscribe(@topic_name) + wait_for_assignment(@group_consumer) + message = nil + + 10.times do + message ||= @group_consumer.poll(100) + end + + refute_nil message + + @group_consumer.commit + @group_consumer.close + end + + after do + @group_producer.close + @group_consumer.close + end + + it "deletes the group" do + delete_group_handle = admin.delete_group(@group_name) + report = delete_group_handle.wait(max_wait_timeout_ms: 15_000) + + assert_equal @group_name, report.result_name + end + end + + describe "called with invalid input" do + describe "with the name of a group that does not exist" do + it "raises an exception" do + delete_group_handle = admin.delete_group(@group_name) + + ex = assert_raises(Rdkafka::RdkafkaError) do + delete_group_handle.wait(max_wait_timeout_ms: 15_000) + end + assert_kind_of Rdkafka::RdkafkaError, ex + assert_match(/group_id_not_found|not_coordinator/, ex.message) + end + end + end + end + end + + describe "#create_partitions" do + def metadata_for(topic_name) + admin.metadata(topic_name).topics.first + rescue Rdkafka::RdkafkaError + # We have to wait because if we query too fast after topic creation request, it may not + # yet be available throwing an error. + # This occurs mostly on slow CIs + sleep(1) + admin.metadata(topic_name).topics.first + end + + context "when topic does not exist" do + it "expect to fail due to unknown partition" do + e = assert_raises(Rdkafka::RdkafkaError) do + admin.create_partitions(@topic_name, 10).wait + end + assert_match(/unknown_topic_or_part/, e.message) + end + end + + context "when topic already has the desired number of partitions" do + before { admin.create_topic(@topic_name, 2, 1).wait } + + it "expect not to change number of partitions" do + e = assert_raises(Rdkafka::RdkafkaError) do + admin.create_partitions(@topic_name, 2).wait + end + assert_match(/invalid_partitions/, e.message) + assert_equal 2, metadata_for(@topic_name)[:partition_count] + end + end + + context "when topic has more than the requested number of partitions" do + before { admin.create_topic(@topic_name, 5, 1).wait } + + it "expect not to change number of partitions" do + e = assert_raises(Rdkafka::RdkafkaError) do + admin.create_partitions(@topic_name, 2).wait + end + assert_match(/invalid_partitions/, e.message) + # On slow CI this may propagate, thus we wait a bit + sleep(1) + assert_equal 5, metadata_for(@topic_name)[:partition_count] + end + end + + context "when topic has less then desired number of partitions" do + before do + admin.create_topic(@topic_name, 1, 1).wait + sleep(1) + end + + it "expect to change number of partitions" do + admin.create_partitions(@topic_name, 10).wait + sleep(1) + assert_equal 10, metadata_for(@topic_name)[:partition_count] + end + end + end + + describe "#oauthbearer_set_token" do + context "when sasl not configured" do + it "returns RD_KAFKA_RESP_ERR__STATE" do + response = admin.oauthbearer_set_token( + token: "foo", + lifetime_ms: Time.now.to_i * 1000 + 900 * 1000, + principal_name: "kafka-cluster" + ) + assert_equal Rdkafka::Bindings::RD_KAFKA_RESP_ERR__STATE, response + end + end + + context "when sasl configured" do + before do + config_sasl = rdkafka_config( + "security.protocol": "sasl_ssl", + "sasl.mechanisms": "OAUTHBEARER" + ) + @admin_sasl = config_sasl.admin + end + + after do + @admin_sasl.close + end + + context "without extensions" do + it "succeeds" do + response = @admin_sasl.oauthbearer_set_token( + token: "foo", + lifetime_ms: Time.now.to_i * 1000 + 900 * 1000, + principal_name: "kafka-cluster" + ) + assert_equal 0, response + end + end + + context "with extensions" do + it "succeeds" do + response = @admin_sasl.oauthbearer_set_token( + token: "foo", + lifetime_ms: Time.now.to_i * 1000 + 900 * 1000, + principal_name: "kafka-cluster", + extensions: { + "foo" => "bar" + } + ) + assert_equal 0, response + end + end + end + end + + describe "#events_poll_nb_each" do + it "does not raise when queue is empty" do + admin.events_poll_nb_each { |_| } + end + + it "yields the count after each poll" do + counts = [] + # Stub to return events, then zero + call_count = 0 + Rdkafka::Bindings.stubs(:rd_kafka_poll_nb).with do + call_count += 1 + true + end.returns(1, 1, 0) + + admin.events_poll_nb_each { |count| counts << count } + + assert_equal [1, 1], counts + end + + it "stops when block returns :stop" do + iterations = 0 + # Stub to always return events + Rdkafka::Bindings.stubs(:rd_kafka_poll_nb).returns(1) + + admin.events_poll_nb_each do |_count| + iterations += 1 + :stop if iterations >= 3 + end + + assert_equal 3, iterations + end + + context "when admin is closed" do + before { admin.close } + + it "raises ClosedAdminError" do + e = assert_raises(Rdkafka::ClosedAdminError) do + admin.events_poll_nb_each { |_| } + end + assert_match(/events_poll_nb_each/, e.message) + end + end + end + + describe "file descriptor access for fiber scheduler integration" do + before do + @admin = @config.admin(run_polling_thread: false) + end + + it "enables IO events on admin queue" do + signal_r, signal_w = IO.pipe + admin.enable_queue_io_events(signal_w.fileno) + signal_r.close + signal_w.close + end + + it "enables IO events on background queue" do + signal_r, signal_w = IO.pipe + admin.enable_background_queue_io_events(signal_w.fileno) + signal_r.close + signal_w.close + end + + context "when admin is closed" do + before { admin.close } + + it "raises ClosedInnerError when enabling queue_io_events" do + signal_r, signal_w = IO.pipe + assert_raises(Rdkafka::ClosedInnerError) do + admin.enable_queue_io_events(signal_w.fileno) + end + signal_r.close + signal_w.close + end + + it "raises ClosedInnerError when enabling background_queue_io_events" do + signal_r, signal_w = IO.pipe + assert_raises(Rdkafka::ClosedInnerError) do + admin.enable_background_queue_io_events(signal_w.fileno) + end + signal_r.close + signal_w.close + end + end + end + + unless RUBY_PLATFORM == "java" + context "when operating from a fork" do + # @see https://github.com/ffi/ffi/issues/1114 + it "expect to be able to create topics and run other admin operations without hanging" do + # If the FFI issue is not mitigated, this will hang forever + pid = fork do + admin + .create_topic(@topic_name, @topic_partition_count, @topic_replication_factor) + .wait + end + + Process.wait(pid) + end + end + end +end diff --git a/spec/lib/rdkafka/bindings_spec.rb b/test/lib/rdkafka/bindings_test.rb similarity index 53% rename from spec/lib/rdkafka/bindings_spec.rb rename to test/lib/rdkafka/bindings_test.rb index b89fcb92..7d6c6a61 100644 --- a/spec/lib/rdkafka/bindings_spec.rb +++ b/test/lib/rdkafka/bindings_test.rb @@ -1,27 +1,22 @@ # frozen_string_literal: true +require_relative "../../test_helper" require "zlib" -RSpec.describe Rdkafka::Bindings do +describe Rdkafka::Bindings do it "loads librdkafka" do - expect(described_class.ffi_libraries.map(&:name).first).to include "librdkafka" + assert_includes described_class.ffi_libraries.map(&:name).first, "librdkafka" end describe "glibc error handling" do it "provides a helpful error message for glibc compatibility issues" do - # This test simulates what would happen if the library loading fails with a glibc error - # We can't actually test the real scenario without breaking the test suite, - # but we can verify the error message format would be correct - - # Create a mock LoadError with a glibc message glibc_error = LoadError.new("Could not open library 'librdkafka.so': /lib/x86_64-linux-gnu/libc.so.6: version `GLIBC_2.38' not found") - # Extract what the error handler would do error_message = glibc_error.message - expect(error_message).to match(/GLIBC_[\d.]+['"` ]?\s*not found/i) + assert_match(/GLIBC_[\d.]+['"` ]?\s*not found/i, error_message) glibc_version = error_message[/GLIBC_([\d.]+)/, 1] - expect(glibc_version).to eq("2.38") + assert_equal "2.38", glibc_version end it "detects various glibc error message formats" do @@ -32,113 +27,112 @@ "version `GLIBC_2.39` not found" ] - expect(test_cases).to all(match(/GLIBC_[\d.]+['"` ]?\s*not found/i)) + test_cases.each do |tc| + assert_match(/GLIBC_[\d.]+['"` ]?\s*not found/i, tc) + end end it "handles edge cases where version extraction fails gracefully" do - # Simulate an edge case where the pattern matches but capture group might fail error_message = "GLIBC_ not found" - # The pattern might match but version extraction should fallback to 'unknown' if /GLIBC_[\d.]+['"` ]?\s*not found/i.match?(error_message) - # This won't execute because the pattern requires [\d.]+ glibc_version = error_message[/GLIBC_([\d.]+)/, 1] || "unknown" - expect(glibc_version).to eq("unknown") + assert_equal "unknown", glibc_version end - # More realistic: ensure fallback works if regex changes in the future glibc_version = error_message[/GLIBC_([\d.]+)/, 1] || "unknown" - expect(glibc_version).to eq("unknown") + assert_equal "unknown", glibc_version end end describe ".lib_extension" do it "knows the lib extension for darwin" do - stub_const("RbConfig::CONFIG", "host_os" => "darwin") - expect(described_class.lib_extension).to eq "dylib" + with_stubbed_const(RbConfig, :CONFIG, "host_os" => "darwin") do + assert_equal "dylib", described_class.lib_extension + end end it "knows the lib extension for linux" do - stub_const("RbConfig::CONFIG", "host_os" => "linux") - expect(described_class.lib_extension).to eq "so" + with_stubbed_const(RbConfig, :CONFIG, "host_os" => "linux") do + assert_equal "so", described_class.lib_extension + end end end it "successfullies call librdkafka" do - expect { - described_class.rd_kafka_conf_new - }.not_to raise_error + described_class.rd_kafka_conf_new end describe "non-blocking poll bindings" do it "has rd_kafka_poll_nb attached" do - expect(described_class).to respond_to(:rd_kafka_poll_nb) + assert_respond_to described_class, :rd_kafka_poll_nb end it "has rd_kafka_consumer_poll_nb attached" do - expect(described_class).to respond_to(:rd_kafka_consumer_poll_nb) + assert_respond_to described_class, :rd_kafka_consumer_poll_nb end end describe "log callback" do - let(:log_queue) { Rdkafka::Config.log_queue } - - before do - allow(log_queue).to receive(:<<) - end - - it "logs fatal messages" do + it "logs fatal messages at level 0" do + log_queue = Rdkafka::Config.log_queue + log_queue.expects(:<<).with([Logger::FATAL, "rdkafka: log line"]) Rdkafka::Bindings::LogCallback.call(nil, 0, nil, "log line") - expect(log_queue).to have_received(:<<).with([Logger::FATAL, "rdkafka: log line"]) end - it "logs fatal messages" do + it "logs fatal messages at level 1" do + log_queue = Rdkafka::Config.log_queue + log_queue.expects(:<<).with([Logger::FATAL, "rdkafka: log line"]) Rdkafka::Bindings::LogCallback.call(nil, 1, nil, "log line") - expect(log_queue).to have_received(:<<).with([Logger::FATAL, "rdkafka: log line"]) end - it "logs fatal messages" do + it "logs fatal messages at level 2" do + log_queue = Rdkafka::Config.log_queue + log_queue.expects(:<<).with([Logger::FATAL, "rdkafka: log line"]) Rdkafka::Bindings::LogCallback.call(nil, 2, nil, "log line") - expect(log_queue).to have_received(:<<).with([Logger::FATAL, "rdkafka: log line"]) end it "logs error messages" do + log_queue = Rdkafka::Config.log_queue + log_queue.expects(:<<).with([Logger::ERROR, "rdkafka: log line"]) Rdkafka::Bindings::LogCallback.call(nil, 3, nil, "log line") - expect(log_queue).to have_received(:<<).with([Logger::ERROR, "rdkafka: log line"]) end it "logs warning messages" do + log_queue = Rdkafka::Config.log_queue + log_queue.expects(:<<).with([Logger::WARN, "rdkafka: log line"]) Rdkafka::Bindings::LogCallback.call(nil, 4, nil, "log line") - expect(log_queue).to have_received(:<<).with([Logger::WARN, "rdkafka: log line"]) end - it "logs info messages" do + it "logs info messages at level 5" do + log_queue = Rdkafka::Config.log_queue + log_queue.expects(:<<).with([Logger::INFO, "rdkafka: log line"]) Rdkafka::Bindings::LogCallback.call(nil, 5, nil, "log line") - expect(log_queue).to have_received(:<<).with([Logger::INFO, "rdkafka: log line"]) end - it "logs info messages" do + it "logs info messages at level 6" do + log_queue = Rdkafka::Config.log_queue + log_queue.expects(:<<).with([Logger::INFO, "rdkafka: log line"]) Rdkafka::Bindings::LogCallback.call(nil, 6, nil, "log line") - expect(log_queue).to have_received(:<<).with([Logger::INFO, "rdkafka: log line"]) end it "logs debug messages" do + log_queue = Rdkafka::Config.log_queue + log_queue.expects(:<<).with([Logger::DEBUG, "rdkafka: log line"]) Rdkafka::Bindings::LogCallback.call(nil, 7, nil, "log line") - expect(log_queue).to have_received(:<<).with([Logger::DEBUG, "rdkafka: log line"]) end it "logs unknown messages" do + log_queue = Rdkafka::Config.log_queue + log_queue.expects(:<<).with([Logger::UNKNOWN, "rdkafka: log line"]) Rdkafka::Bindings::LogCallback.call(nil, 100, nil, "log line") - expect(log_queue).to have_received(:<<).with([Logger::UNKNOWN, "rdkafka: log line"]) end end describe "stats callback" do context "without a stats callback" do it "does nothing" do - expect { - Rdkafka::Bindings::StatsCallback.call(nil, "{}", 2, nil) - }.not_to raise_error + Rdkafka::Bindings::StatsCallback.call(nil, "{}", 2, nil) end end @@ -151,7 +145,7 @@ it "calls the stats callback with a stats hash" do Rdkafka::Bindings::StatsCallback.call(nil, "{\"received\":1}", 13, nil) - expect($received_stats).to eq({ "received" => 1 }) + assert_equal({ "received" => 1 }, $received_stats) end end end @@ -159,9 +153,7 @@ describe "error callback" do context "without an error callback" do it "does nothing" do - expect { - Rdkafka::Bindings::ErrorCallback.call(nil, 1, "error", nil) - }.not_to raise_error + Rdkafka::Bindings::ErrorCallback.call(nil, 1, "error", nil) end end @@ -174,13 +166,13 @@ it "calls the error callback with an Rdkafka::Error" do Rdkafka::Bindings::ErrorCallback.call(nil, 8, "Broker not available", nil) - expect($received_error.code).to eq(:broker_not_available) - expect($received_error.broker_message).to eq("Broker not available") + assert_equal :broker_not_available, $received_error.code + assert_equal "Broker not available", $received_error.broker_message end it "sets instance_name to nil when client_ptr is null" do Rdkafka::Bindings::ErrorCallback.call(nil, 8, "Broker not available", nil) - expect($received_error.instance_name).to be_nil + assert_nil $received_error.instance_name end end @@ -191,21 +183,19 @@ received_errors << error end - # Create a producer pointing to a non-existent broker to trigger error callbacks config = Rdkafka::Config.new( "bootstrap.servers": "127.0.0.1:19999", "statistics.interval.ms": 0 ) producer = config.producer - # Wait for librdkafka to attempt connection and fire error callbacks sleep(2) producer.close errors_with_name = received_errors.select { |e| e.instance_name } - expect(errors_with_name).not_to be_empty - expect(errors_with_name.first.instance_name).to include("rdkafka#producer-") + refute_empty errors_with_name + assert_includes errors_with_name.first.instance_name, "rdkafka#producer-" end end end @@ -213,20 +203,20 @@ describe "oauthbearer set token" do context "with args" do before do - DEFAULT_TOKEN_EXPIRY_SECONDS = 900 - $token_value = "token" - $md_lifetime_ms = Time.now.to_i * 1000 + DEFAULT_TOKEN_EXPIRY_SECONDS * 1000 - $md_principal_name = "kafka-cluster" - $extensions = nil - $extension_size = 0 - $error_buffer = FFI::MemoryPointer.from_string(" " * 256) + @default_token_expiry_seconds = 900 + @token_value = "token" + @md_lifetime_ms = Time.now.to_i * 1000 + @default_token_expiry_seconds * 1000 + @md_principal_name = "kafka-cluster" + @extensions = nil + @extension_size = 0 + @error_buffer = FFI::MemoryPointer.from_string(" " * 256) end it "sets token or capture failure" do RdKafkaTestConsumer.with do |consumer_ptr| - response = described_class.rd_kafka_oauthbearer_set_token(consumer_ptr, $token_value, $md_lifetime_ms, $md_principal_name, $extensions, $extension_size, $error_buffer, 256) - expect(response).to eq(Rdkafka::Bindings::RD_KAFKA_RESP_ERR__STATE) - expect($error_buffer.read_string).to eq("SASL/OAUTHBEARER is not the configured authentication mechanism") + response = described_class.rd_kafka_oauthbearer_set_token(consumer_ptr, @token_value, @md_lifetime_ms, @md_principal_name, @extensions, @extension_size, @error_buffer, 256) + assert_equal Rdkafka::Bindings::RD_KAFKA_RESP_ERR__STATE, response + assert_equal "SASL/OAUTHBEARER is not the configured authentication mechanism", @error_buffer.read_string end end end @@ -235,20 +225,18 @@ describe "oauthbearer set token failure" do context "without args" do it "fails" do - expect { + assert_raises(ArgumentError) do described_class.rd_kafka_oauthbearer_set_token_failure - }.to raise_error(ArgumentError) + end end end context "with args" do it "succeeds" do - expect { - errstr = "error" - RdKafkaTestConsumer.with do |consumer_ptr| - described_class.rd_kafka_oauthbearer_set_token_failure(consumer_ptr, errstr) - end - }.not_to raise_error + errstr = "error" + RdKafkaTestConsumer.with do |consumer_ptr| + described_class.rd_kafka_oauthbearer_set_token_failure(consumer_ptr, errstr) + end end end end @@ -256,9 +244,7 @@ describe "oauthbearer callback" do context "without an oauthbearer callback" do it "does nothing" do - expect { - Rdkafka::Bindings::OAuthbearerTokenRefreshCallback.call(nil, "", nil) - }.not_to raise_error + Rdkafka::Bindings::OAuthbearerTokenRefreshCallback.call(nil, "", nil) end end @@ -273,8 +259,8 @@ it "calls the oauth bearer callback and receive config and client name" do RdKafkaTestConsumer.with do |consumer_ptr| Rdkafka::Bindings::OAuthbearerTokenRefreshCallback.call(consumer_ptr, "{}", nil) - expect($received_config).to eq("{}") - expect($received_client_name).to match(/consumer/) + assert_equal "{}", $received_config + assert_match(/consumer/, $received_client_name) end end end diff --git a/test/lib/rdkafka/callbacks_test.rb b/test/lib/rdkafka/callbacks_test.rb new file mode 100644 index 00000000..271ac3f4 --- /dev/null +++ b/test/lib/rdkafka/callbacks_test.rb @@ -0,0 +1,3 @@ +# frozen_string_literal: true + +require_relative "../../test_helper" diff --git a/spec/lib/rdkafka/config_spec.rb b/test/lib/rdkafka/config_test.rb similarity index 60% rename from spec/lib/rdkafka/config_spec.rb rename to test/lib/rdkafka/config_test.rb index e30e4976..c5c26fb0 100644 --- a/spec/lib/rdkafka/config_spec.rb +++ b/test/lib/rdkafka/config_test.rb @@ -1,22 +1,24 @@ # frozen_string_literal: true -RSpec.describe Rdkafka::Config do +require_relative "../../test_helper" + +describe Rdkafka::Config do context "logger" do it "has a default logger" do - expect(described_class.logger).to be_a Logger + assert_kind_of Logger, described_class.logger end it "sets the logger" do logger = Logger.new($stdout) - expect(described_class.logger).not_to eq logger + refute_equal logger, described_class.logger described_class.logger = logger - expect(described_class.logger).to eq logger + assert_equal logger, described_class.logger end it "does not accept a nil logger" do - expect { + assert_raises(Rdkafka::Config::NoLoggerError) do described_class.logger = nil - }.to raise_error(Rdkafka::Config::NoLoggerError) + end end it "supports logging queue" do @@ -30,7 +32,7 @@ sleep 0.05 end - expect(log.string).to include "FATAL -- : I love testing" + assert_includes log.string, "FATAL -- : I love testing" end unless RUBY_PLATFORM == "java" @@ -50,7 +52,7 @@ writer.close Process.wait(pid) output = reader.read - expect(output.split("\n").size).to be >= 20 + assert_operator output.split("\n").size, :>=, 20 end end end @@ -58,11 +60,9 @@ context "statistics callback" do context "with a proc/lambda" do it "sets the callback" do - expect { - described_class.statistics_callback = lambda do |stats| - end - }.not_to raise_error - expect(described_class.statistics_callback).to respond_to :call + described_class.statistics_callback = lambda do |stats| + end + assert_respond_to described_class.statistics_callback, :call end end @@ -72,28 +72,24 @@ def call(stats) end end - expect { - described_class.statistics_callback = callback.new - }.not_to raise_error - expect(described_class.statistics_callback).to respond_to :call + described_class.statistics_callback = callback.new + assert_respond_to described_class.statistics_callback, :call end end it "does not accept a callback that's not callable" do - expect { + assert_raises(TypeError) do described_class.statistics_callback = "a string" - }.to raise_error(TypeError) + end end end context "error callback" do context "with a proc/lambda" do it "sets the callback" do - expect { - described_class.error_callback = lambda do |error| - end - }.not_to raise_error - expect(described_class.error_callback).to respond_to :call + described_class.error_callback = lambda do |error| + end + assert_respond_to described_class.error_callback, :call end end @@ -103,34 +99,28 @@ def call(stats) def call(stats) end end - expect { - described_class.error_callback = callback.new - }.not_to raise_error - expect(described_class.error_callback).to respond_to :call + described_class.error_callback = callback.new + assert_respond_to described_class.error_callback, :call end end it "does not accept a callback that's not callable" do - expect { + assert_raises(TypeError) do described_class.error_callback = "a string" - }.to raise_error(TypeError) + end end it "accepts nil to clear the callback" do - expect { - described_class.error_callback = nil - }.not_to raise_error + described_class.error_callback = nil end end context "oauthbearer calllback" do context "with a proc/lambda" do it "sets the callback" do - expect { - described_class.oauthbearer_token_refresh_callback = lambda do |config, client_name| - end - }.not_to raise_error - expect(described_class.oauthbearer_token_refresh_callback).to respond_to :call + described_class.oauthbearer_token_refresh_callback = lambda do |config, client_name| + end + assert_respond_to described_class.oauthbearer_token_refresh_callback, :call end end @@ -141,17 +131,15 @@ def call(config, client_name) end end - expect { - described_class.oauthbearer_token_refresh_callback = callback.new - }.not_to raise_error - expect(described_class.oauthbearer_token_refresh_callback).to respond_to :call + described_class.oauthbearer_token_refresh_callback = callback.new + assert_respond_to described_class.oauthbearer_token_refresh_callback, :call end end it "does not accept a callback that's not callable" do - expect { + assert_raises(TypeError) do described_class.oauthbearer_token_refresh_callback = "not a callback" - }.to raise_error(TypeError) + end end end @@ -159,17 +147,17 @@ def call(config, client_name) it "stores configuration" do config = described_class.new config[:key] = "value" - expect(config[:key]).to eq "value" + assert_equal "value", config[:key] end it "uses default configuration" do config = described_class.new - expect(config[:"api.version.request"]).to be_nil + assert_nil config[:"api.version.request"] end it "creates a consumer with valid config" do consumer = rdkafka_consumer_config.consumer - expect(consumer).to be_a Rdkafka::Consumer + assert_kind_of Rdkafka::Consumer, consumer consumer.close end @@ -177,65 +165,71 @@ def call(config, client_name) config = rdkafka_consumer_config config.consumer_poll_set = false consumer = config.consumer - expect(consumer).to be_a Rdkafka::Consumer + assert_kind_of Rdkafka::Consumer, consumer consumer.close end it "raises an error when creating a consumer with invalid config" do config = described_class.new("invalid.key" => "value") - expect { + e = assert_raises(Rdkafka::Config::ConfigError) do config.consumer - }.to raise_error(Rdkafka::Config::ConfigError, "No such configuration property: \"invalid.key\"") + end + assert_equal "No such configuration property: \"invalid.key\"", e.message end it "raises an error when creating a consumer with a nil key in the config" do config = described_class.new(nil => "value") - expect { + e = assert_raises(Rdkafka::Config::ConfigError) do config.consumer - }.to raise_error(Rdkafka::Config::ConfigError, "No such configuration property: \"\"") + end + assert_equal "No such configuration property: \"\"", e.message end it "treats a nil value as blank" do config = described_class.new("security.protocol" => nil) - expect { + e = assert_raises(Rdkafka::Config::ConfigError) do config.consumer config.producer - }.to raise_error(Rdkafka::Config::ConfigError, "Configuration property \"security.protocol\" cannot be set to empty value") + end + assert_equal "Configuration property \"security.protocol\" cannot be set to empty value", e.message end it "creates a producer with valid config" do producer = rdkafka_consumer_config.producer - expect(producer).to be_a Rdkafka::Producer + assert_kind_of Rdkafka::Producer, producer producer.close end it "raises an error when creating a producer with invalid config" do config = described_class.new("invalid.key" => "value") - expect { + e = assert_raises(Rdkafka::Config::ConfigError) do config.producer - }.to raise_error(Rdkafka::Config::ConfigError, "No such configuration property: \"invalid.key\"") + end + assert_equal "No such configuration property: \"invalid.key\"", e.message end it "allows string partitioner key" do - expect(Rdkafka::Producer).to receive(:new).with(kind_of(Rdkafka::NativeKafka), "murmur2").and_call_original config = described_class.new("partitioner" => "murmur2") - config.producer.close + producer = config.producer + assert_kind_of Rdkafka::Producer, producer + producer.close end it "allows symbol partitioner key" do - expect(Rdkafka::Producer).to receive(:new).with(kind_of(Rdkafka::NativeKafka), "murmur2").and_call_original config = described_class.new(partitioner: "murmur2") - config.producer.close + producer = config.producer + assert_kind_of Rdkafka::Producer, producer + producer.close end it "allows configuring zstd compression" do config = described_class.new("compression.codec" => "zstd") begin producer = config.producer - expect(producer).to be_a Rdkafka::Producer + assert_kind_of Rdkafka::Producer, producer producer.close rescue Rdkafka::Config::ConfigError => ex - pending "Zstd compression not supported on this machine" + skip "Zstd compression not supported on this machine" raise ex end end @@ -245,9 +239,10 @@ def call(config, client_name) "security.protocol" => "SSL", "ssl.ca.location" => "/nonsense" ) - expect { + e = assert_raises(Rdkafka::Config::ClientCreationError) do config.consumer - }.to raise_error(Rdkafka::Config::ClientCreationError, /ssl.ca.location failed(.*)/) + end + assert_match(/ssl.ca.location failed(.*)/, e.message) end it "raises an error when client creation fails for a producer" do @@ -255,9 +250,10 @@ def call(config, client_name) "security.protocol" => "SSL", "ssl.ca.location" => "/nonsense" ) - expect { + e = assert_raises(Rdkafka::Config::ClientCreationError) do config.producer - }.to raise_error(Rdkafka::Config::ClientCreationError, /ssl.ca.location failed(.*)/) + end + assert_match(/ssl.ca.location failed(.*)/, e.message) end end end diff --git a/test/lib/rdkafka/consumer/headers_test.rb b/test/lib/rdkafka/consumer/headers_test.rb new file mode 100644 index 00000000..8388b4a8 --- /dev/null +++ b/test/lib/rdkafka/consumer/headers_test.rb @@ -0,0 +1,91 @@ +# frozen_string_literal: true + +require_relative "../../../test_helper" + +describe Rdkafka::Consumer::Headers do + before do + @expected_headers = { + "version" => ["2.1.3", "2.1.4"], + "type" => "String" + } + end + + describe ".from_native" do + # Store the original FFI methods at class load time (before any test can remove them) + ORIG_RD_KAFKA_MESSAGE_HEADERS = Rdkafka::Bindings.method(:rd_kafka_message_headers) + ORIG_RD_KAFKA_HEADER_GET_ALL = Rdkafka::Bindings.method(:rd_kafka_header_get_all) + + before do + @native_message = FFI::MemoryPointer.new(:int) + @headers_ptr = FFI::MemoryPointer.new(:int) + + header_data = [ + { name: "version", value: "2.1.3" }, + { name: "version", value: "2.1.4" }, + { name: "type", value: "String" } + ] + + headers_ptr = @headers_ptr + bindings_meta = class << Rdkafka::Bindings; self; end + + # Remove existing singleton methods to avoid redefinition warnings + bindings_meta.send(:remove_method, :rd_kafka_message_headers) if bindings_meta.method_defined?(:rd_kafka_message_headers, false) + bindings_meta.send(:define_method, :rd_kafka_message_headers) do |msg, ptrptr| + ptrptr.write_pointer(headers_ptr) if ptrptr.respond_to?(:write_pointer) + Rdkafka::Bindings::RD_KAFKA_RESP_ERR_NO_ERROR + end + + bindings_meta.send(:remove_method, :rd_kafka_header_get_all) if bindings_meta.method_defined?(:rd_kafka_header_get_all, false) + bindings_meta.send(:define_method, :rd_kafka_header_get_all) do |ptr, idx, name_ptrptr, value_ptrptr, size_ptr| + if idx < header_data.size + hdr = header_data[idx] + + name_buf = FFI::MemoryPointer.from_string(hdr[:name]) + name_ptrptr.write_pointer(name_buf) + + value_buf = FFI::MemoryPointer.from_string(hdr[:value]) + value_ptrptr.write_pointer(value_buf) + + size_ptr[:value] = hdr[:value].bytesize + + Rdkafka::Bindings::RD_KAFKA_RESP_ERR_NO_ERROR + else + Rdkafka::Bindings::RD_KAFKA_RESP_ERR__NOENT + end + end + end + + after do + # Restore the original FFI-attached methods + bindings_meta = class << Rdkafka::Bindings; self; end + bindings_meta.send(:remove_method, :rd_kafka_message_headers) if bindings_meta.method_defined?(:rd_kafka_message_headers, false) + bindings_meta.send(:define_method, :rd_kafka_message_headers, ORIG_RD_KAFKA_MESSAGE_HEADERS) + bindings_meta.send(:remove_method, :rd_kafka_header_get_all) if bindings_meta.method_defined?(:rd_kafka_header_get_all, false) + bindings_meta.send(:define_method, :rd_kafka_header_get_all, ORIG_RD_KAFKA_HEADER_GET_ALL) + end + + def headers + @headers ||= described_class.from_native(@native_message) + end + + it "returns the expected headers" do + assert_equal @expected_headers, headers + end + + it "returns frozen headers" do + assert headers.frozen? + end + + it "returns array for duplicate headers" do + assert_equal ["2.1.3", "2.1.4"], headers["version"] + end + + it "returns string for single headers" do + assert_equal "String", headers["type"] + end + + it "does not support symbols mappings" do + assert_equal false, headers.key?(:version) + end + end +end diff --git a/test/lib/rdkafka/consumer/message_test.rb b/test/lib/rdkafka/consumer/message_test.rb new file mode 100644 index 00000000..0050665c --- /dev/null +++ b/test/lib/rdkafka/consumer/message_test.rb @@ -0,0 +1,128 @@ +# frozen_string_literal: true + +require_relative "../../../test_helper" + +describe Rdkafka::Consumer::Message do + before do + @native_client = new_native_client + @native_topic = new_native_topic(native_client: @native_client) + @payload = nil + @key = nil + end + + after do + Rdkafka::Bindings.rd_kafka_destroy(@native_client) + end + + def build_native_message(payload: @payload, key: @key) + Rdkafka::Bindings::Message.new.tap do |msg| + msg[:rkt] = @native_topic + msg[:partition] = 3 + msg[:offset] = 100 + if payload + ptr = FFI::MemoryPointer.new(:char, payload.bytesize) + ptr.put_bytes(0, payload) + msg[:payload] = ptr + msg[:len] = payload.bytesize + end + if key + ptr = FFI::MemoryPointer.new(:char, key.bytesize) + ptr.put_bytes(0, key) + msg[:key] = ptr + msg[:key_len] = key.bytesize + end + end + end + + def build_message(payload: @payload, key: @key) + native_msg = build_native_message(payload: payload, key: key) + # Mock headers to avoid segfault when reading headers from a message created from scratch + Rdkafka::Bindings.stubs(:rd_kafka_message_headers).returns(Rdkafka::Bindings::RD_KAFKA_RESP_ERR__NOENT) + described_class.new(native_msg) + end + + it "has a topic" do + message = build_message + assert_equal "topic_name", message.topic + end + + it "has a partition" do + message = build_message + assert_equal 3, message.partition + end + + context "payload" do + it "has a nil payload when none is present" do + message = build_message + assert_nil message.payload + end + + context "present payload" do + it "has a payload" do + message = build_message(payload: "payload content") + assert_equal "payload content", message.payload + end + end + end + + context "key" do + it "has a nil key when none is present" do + message = build_message + assert_nil message.key + end + + context "present key" do + it "has a key" do + message = build_message(key: "key content") + assert_equal "key content", message.key + end + end + end + + it "has an offset" do + message = build_message + assert_equal 100, message.offset + end + + describe "#timestamp" do + context "without a timestamp" do + it "has a nil timestamp if not present" do + Rdkafka::Bindings.stubs(:rd_kafka_message_timestamp).returns(-1) + message = build_message + assert_nil message.timestamp + end + end + + context "with a timestamp" do + it "has timestamp if present" do + Rdkafka::Bindings.stubs(:rd_kafka_message_timestamp).returns(1505069646250) + message = build_message + assert_equal Time.at(1505069646, 250_000), message.timestamp + end + end + end + + describe "#to_s" do + it "has a human readable representation" do + message = build_message + message.stubs(:timestamp).returns(1000) + assert_equal "", message.to_s + end + + context "with key and payload" do + it "has a human readable representation" do + message = build_message(key: "key", payload: "payload") + message.stubs(:timestamp).returns(1000) + assert_equal "", message.to_s + end + end + + context "with a very long key and payload" do + it "has a human readable representation" do + message = build_message(key: "k" * 100_000, payload: "p" * 100_000) + message.stubs(:timestamp).returns(1000) + assert_equal "", message.to_s + end + end + end +end diff --git a/test/lib/rdkafka/consumer/partition_test.rb b/test/lib/rdkafka/consumer/partition_test.rb new file mode 100644 index 00000000..eedac227 --- /dev/null +++ b/test/lib/rdkafka/consumer/partition_test.rb @@ -0,0 +1,65 @@ +# frozen_string_literal: true + +require_relative "../../../test_helper" + +describe Rdkafka::Consumer::Partition do + before do + @offset = 100 + @err = 0 + @partition = described_class.new(1, @offset, @err) + end + + it "has a partition" do + assert_equal 1, @partition.partition + end + + it "has an offset" do + assert_equal 100, @partition.offset + end + + it "has an err code" do + assert_equal 0, @partition.err + end + + describe "#to_s" do + it "returns a human readable representation" do + assert_equal "", @partition.to_s + end + end + + describe "#inspect" do + it "returns a human readable representation" do + assert_equal "", @partition.to_s + end + + context "without offset" do + before do + @partition_no_offset = described_class.new(1, nil, 0) + end + + it "returns a human readable representation" do + assert_equal "", @partition_no_offset.to_s + end + end + + context "with err code" do + before do + @partition_with_err = described_class.new(1, 100, 1) + end + + it "returns a human readable representation" do + assert_equal "", @partition_with_err.to_s + end + end + end + + describe "#==" do + it "equals another partition with the same content" do + assert_equal described_class.new(1, 100), @partition + end + + it "does not equal another partition with different content" do + refute_equal described_class.new(2, 101), @partition + end + end +end diff --git a/spec/lib/rdkafka/consumer/topic_partition_list_spec.rb b/test/lib/rdkafka/consumer/topic_partition_list_test.rb similarity index 62% rename from spec/lib/rdkafka/consumer/topic_partition_list_spec.rb rename to test/lib/rdkafka/consumer/topic_partition_list_test.rb index 7ceaca59..817ea0f6 100644 --- a/spec/lib/rdkafka/consumer/topic_partition_list_spec.rb +++ b/test/lib/rdkafka/consumer/topic_partition_list_test.rb @@ -1,151 +1,151 @@ # frozen_string_literal: true -RSpec.describe Rdkafka::Consumer::TopicPartitionList do +require_relative "../../../test_helper" + +describe Rdkafka::Consumer::TopicPartitionList do it "creates a new list and add unassigned topics" do list = described_class.new - expect(list.count).to eq 0 - expect(list.empty?).to be true + assert_equal 0, list.count + assert_equal true, list.empty? list.add_topic("topic1") list.add_topic("topic2") - expect(list.count).to eq 2 - expect(list.empty?).to be false + assert_equal 2, list.count + assert_equal false, list.empty? hash = list.to_h - expect(hash.count).to eq 2 - expect(hash).to eq({ + assert_equal 2, hash.count + assert_equal({ "topic1" => nil, "topic2" => nil - }) + }, hash) end it "creates a new list and add assigned topics as a range" do list = described_class.new - expect(list.count).to eq 0 - expect(list.empty?).to be true + assert_equal 0, list.count + assert_equal true, list.empty? list.add_topic("topic1", 0..2) list.add_topic("topic2", 0..1) - expect(list.count).to eq 5 - expect(list.empty?).to be false + assert_equal 5, list.count + assert_equal false, list.empty? hash = list.to_h - expect(hash.count).to eq 2 - expect(hash["topic1"]).to eq([ + assert_equal 2, hash.count + assert_equal([ Rdkafka::Consumer::Partition.new(0, nil), Rdkafka::Consumer::Partition.new(1, nil), Rdkafka::Consumer::Partition.new(2, nil) - ]) - expect(hash["topic2"]).to eq([ + ], hash["topic1"]) + assert_equal([ Rdkafka::Consumer::Partition.new(0, nil), Rdkafka::Consumer::Partition.new(1, nil) - ]) + ], hash["topic2"]) end it "creates a new list and add assigned topics as an array" do list = described_class.new - expect(list.count).to eq 0 - expect(list.empty?).to be true + assert_equal 0, list.count + assert_equal true, list.empty? list.add_topic("topic1", [0, 1, 2]) list.add_topic("topic2", [0, 1]) - expect(list.count).to eq 5 - expect(list.empty?).to be false + assert_equal 5, list.count + assert_equal false, list.empty? hash = list.to_h - expect(hash.count).to eq 2 - expect(hash["topic1"]).to eq([ + assert_equal 2, hash.count + assert_equal([ Rdkafka::Consumer::Partition.new(0, nil), Rdkafka::Consumer::Partition.new(1, nil), Rdkafka::Consumer::Partition.new(2, nil) - ]) - expect(hash["topic2"]).to eq([ + ], hash["topic1"]) + assert_equal([ Rdkafka::Consumer::Partition.new(0, nil), Rdkafka::Consumer::Partition.new(1, nil) - ]) + ], hash["topic2"]) end it "creates a new list and add assigned topics as a count" do list = described_class.new - expect(list.count).to eq 0 - expect(list.empty?).to be true + assert_equal 0, list.count + assert_equal true, list.empty? list.add_topic("topic1", 3) list.add_topic("topic2", 2) - expect(list.count).to eq 5 - expect(list.empty?).to be false + assert_equal 5, list.count + assert_equal false, list.empty? hash = list.to_h - expect(hash.count).to eq 2 - expect(hash["topic1"]).to eq([ + assert_equal 2, hash.count + assert_equal([ Rdkafka::Consumer::Partition.new(0, nil), Rdkafka::Consumer::Partition.new(1, nil), Rdkafka::Consumer::Partition.new(2, nil) - ]) - expect(hash["topic2"]).to eq([ + ], hash["topic1"]) + assert_equal([ Rdkafka::Consumer::Partition.new(0, nil), Rdkafka::Consumer::Partition.new(1, nil) - ]) + ], hash["topic2"]) end it "creates a new list and add topics and partitions with an offset" do list = described_class.new - expect(list.count).to eq 0 - expect(list.empty?).to be true + assert_equal 0, list.count + assert_equal true, list.empty? list.add_topic_and_partitions_with_offsets("topic1", 0 => 5, 1 => 6, 2 => 7) hash = list.to_h - expect(hash.count).to eq 1 - expect(hash["topic1"]).to eq([ + assert_equal 1, hash.count + assert_equal([ Rdkafka::Consumer::Partition.new(0, 5), Rdkafka::Consumer::Partition.new(1, 6), Rdkafka::Consumer::Partition.new(2, 7) - ]) + ], hash["topic1"]) end describe "#to_s" do - let(:expected) do - if RUBY_VERSION >= "3.4.0" + it "returns a human readable representation" do + expected = if RUBY_VERSION >= "3.4.0" " [, ]}>" else "[, ]}>" end - end - it "returns a human readable representation" do list = described_class.new list.add_topic("topic1", [0, 1]) - expect(list.to_s).to eq expected + assert_equal expected, list.to_s end end describe "#==" do - let(:list) do - described_class.new.tap do |list| - list.add_topic("topic1", [0]) + before do + @list = described_class.new.tap do |l| + l.add_topic("topic1", [0]) end end it "equals another partition with the same content" do - other = described_class.new.tap do |list| - list.add_topic("topic1", [0]) + other = described_class.new.tap do |l| + l.add_topic("topic1", [0]) end - expect(list).to eq other + assert_equal other, @list end it "does not equal another partition with different content" do - expect(list).not_to eq described_class.new + refute_equal described_class.new, @list end end @@ -159,11 +159,11 @@ ) list = described_class.from_native_tpl(pointer) - other = described_class.new.tap do |list| - list.add_topic("topic") + other = described_class.new.tap do |l| + l.add_topic("topic") end - expect(list).to eq other + assert_equal other, list end it "creates a list from an existing native list with offsets" do @@ -181,60 +181,60 @@ ) list = described_class.from_native_tpl(pointer) - other = described_class.new.tap do |list| - list.add_topic_and_partitions_with_offsets("topic", 0 => 100) + other = described_class.new.tap do |l| + l.add_topic_and_partitions_with_offsets("topic", 0 => 100) end - expect(list).to eq other + assert_equal other, list end end describe "#to_native_tpl" do it "creates a native list" do - list = described_class.new.tap do |list| - list.add_topic("topic") + list = described_class.new.tap do |l| + l.add_topic("topic") end tpl = list.to_native_tpl other = described_class.from_native_tpl(tpl) - expect(list).to eq other + assert_equal other, list end it "creates a native list with partitions" do - list = described_class.new.tap do |list| - list.add_topic("topic", 0..16) + list = described_class.new.tap do |l| + l.add_topic("topic", 0..16) end tpl = list.to_native_tpl other = described_class.from_native_tpl(tpl) - expect(list).to eq other + assert_equal other, list end it "creates a native list with offsets" do - list = described_class.new.tap do |list| - list.add_topic_and_partitions_with_offsets("topic", 0 => 100) + list = described_class.new.tap do |l| + l.add_topic_and_partitions_with_offsets("topic", 0 => 100) end tpl = list.to_native_tpl other = described_class.from_native_tpl(tpl) - expect(list).to eq other + assert_equal other, list end it "creates a native list with timetamp offsets if offsets are Time" do - list = described_class.new.tap do |list| - list.add_topic_and_partitions_with_offsets("topic", 0 => Time.at(1505069646, 250_000)) + list = described_class.new.tap do |l| + l.add_topic_and_partitions_with_offsets("topic", 0 => Time.at(1505069646, 250_000)) end tpl = list.to_native_tpl - compare_list = described_class.new.tap do |list| - list.add_topic_and_partitions_with_offsets( + compare_list = described_class.new.tap do |l| + l.add_topic_and_partitions_with_offsets( "topic", 0 => (Time.at(1505069646, 250_000).to_f * 1000).floor ) @@ -242,7 +242,7 @@ native_list = described_class.from_native_tpl(tpl) - expect(native_list).to eq compare_list + assert_equal compare_list, native_list end end end diff --git a/spec/lib/rdkafka/consumer_spec.rb b/test/lib/rdkafka/consumer_test.rb similarity index 66% rename from spec/lib/rdkafka/consumer_spec.rb rename to test/lib/rdkafka/consumer_test.rb index cf4b01a7..30859615 100644 --- a/spec/lib/rdkafka/consumer_spec.rb +++ b/test/lib/rdkafka/consumer_test.rb @@ -3,117 +3,129 @@ require "ostruct" require "securerandom" -RSpec.describe Rdkafka::Consumer do - let(:consumer) { rdkafka_consumer_config.consumer } - let(:producer) { rdkafka_producer_config.producer } - let(:topic) { TestTopics.create } +require_relative "../../test_helper" - after { +describe Rdkafka::Consumer do + def consumer + @consumer ||= rdkafka_consumer_config.consumer + end + + def producer + @producer ||= rdkafka_producer_config.producer + end + + def topic + @topic ||= TestTopics.create + end + + after do consumer.close producer.close - } + end describe "#name" do - it { expect(consumer.name).to include("rdkafka#consumer-") } + it "includes rdkafka#consumer-" do + assert_includes consumer.name, "rdkafka#consumer-" + end end describe "consumer without auto-start" do - let(:consumer) { rdkafka_consumer_config.consumer(native_kafka_auto_start: false) } - it "expect to be able to start it later and close" do + @consumer = rdkafka_consumer_config.consumer(native_kafka_auto_start: false) consumer.start consumer.close end it "expect to be able to close it without starting" do + @consumer = rdkafka_consumer_config.consumer(native_kafka_auto_start: false) consumer.close end end describe "#subscribe, #unsubscribe and #subscription" do it "subscribe,s unsubscribe and return the subscription" do - expect(consumer.subscription).to be_empty + assert_empty consumer.subscription consumer.subscribe(topic) - expect(consumer.subscription).not_to be_empty + refute_empty consumer.subscription expected_subscription = Rdkafka::Consumer::TopicPartitionList.new.tap do |list| list.add_topic(topic) end - expect(consumer.subscription).to eq expected_subscription + assert_equal expected_subscription, consumer.subscription consumer.unsubscribe - expect(consumer.subscription).to be_empty + assert_empty consumer.subscription end it "raises an error when subscribing fails" do - expect(Rdkafka::Bindings).to receive(:rd_kafka_subscribe).and_return(20) + Rdkafka::Bindings.expects(:rd_kafka_subscribe).returns(20) - expect { + assert_raises(Rdkafka::RdkafkaError) do consumer.subscribe(topic) - }.to raise_error(Rdkafka::RdkafkaError) + end end it "raises an error when unsubscribing fails" do - expect(Rdkafka::Bindings).to receive(:rd_kafka_unsubscribe).and_return(20) + Rdkafka::Bindings.expects(:rd_kafka_unsubscribe).returns(20) - expect { + assert_raises(Rdkafka::RdkafkaError) do consumer.unsubscribe - }.to raise_error(Rdkafka::RdkafkaError) + end end it "raises an error when fetching the subscription fails" do - expect(Rdkafka::Bindings).to receive(:rd_kafka_subscription).and_return(20) + Rdkafka::Bindings.expects(:rd_kafka_subscription).returns(20) - expect { + assert_raises(Rdkafka::RdkafkaError) do consumer.subscription - }.to raise_error(Rdkafka::RdkafkaError) + end end context "when using consumer without the poll set" do - let(:consumer) do + it "subscribe,s unsubscribe and return the subscription" do config = rdkafka_consumer_config config.consumer_poll_set = false - config.consumer - end + @consumer = config.consumer - it "subscribe,s unsubscribe and return the subscription" do - expect(consumer.subscription).to be_empty + assert_empty consumer.subscription consumer.subscribe(topic) - expect(consumer.subscription).not_to be_empty + refute_empty consumer.subscription expected_subscription = Rdkafka::Consumer::TopicPartitionList.new.tap do |list| list.add_topic(topic) end - expect(consumer.subscription).to eq expected_subscription + assert_equal expected_subscription, consumer.subscription consumer.unsubscribe - expect(consumer.subscription).to be_empty + assert_empty consumer.subscription end end end describe "#pause and #resume" do context "subscription" do - let(:timeout) { 2000 } + before do + @timeout = 2000 + consumer.subscribe(topic) + end - before { consumer.subscribe(topic) } after { consumer.unsubscribe } it "pauses and then resume" do # 1. partitions are assigned wait_for_assignment(consumer) - expect(consumer.assignment).not_to be_empty + refute_empty consumer.assignment # 2. send a first message send_one_message # 3. ensure that message is successfully consumed - records = consumer.poll(timeout) - expect(records).not_to be_nil + records = consumer.poll(@timeout) + refute_nil records consumer.commit # 4. send a second message @@ -125,8 +137,8 @@ consumer.pause(tpl) # 6. ensure that messages are not available - records = consumer.poll(timeout) - expect(records).to be_nil + records = consumer.poll(@timeout) + assert_nil records # 7. resume the subscription tpl = Rdkafka::Consumer::TopicPartitionList.new @@ -134,85 +146,83 @@ consumer.resume(tpl) # 8. ensure that message is successfully consumed - records = consumer.poll(timeout) - expect(records).not_to be_nil + records = consumer.poll(@timeout) + refute_nil records + end + + def send_one_message + producer.produce( + topic: topic, + payload: "payload 1", + key: "key 1" + ).wait end end it "raises when not TopicPartitionList" do - expect { consumer.pause(true) }.to raise_error(TypeError) - expect { consumer.resume(true) }.to raise_error(TypeError) + assert_raises(TypeError) { consumer.pause(true) } + assert_raises(TypeError) { consumer.resume(true) } end it "raises an error when pausing fails" do list = Rdkafka::Consumer::TopicPartitionList.new.tap { |tpl| tpl.add_topic("topic", 0..1) } - expect(Rdkafka::Bindings).to receive(:rd_kafka_pause_partitions).and_return(20) - expect { + Rdkafka::Bindings.expects(:rd_kafka_pause_partitions).returns(20) + err = assert_raises(Rdkafka::RdkafkaTopicPartitionListError) do consumer.pause(list) - }.to raise_error do |err| - expect(err).to be_instance_of(Rdkafka::RdkafkaTopicPartitionListError) - expect(err.topic_partition_list).to be end + assert_kind_of Rdkafka::RdkafkaTopicPartitionListError, err + refute_nil err.topic_partition_list end it "raises an error when resume fails" do - expect(Rdkafka::Bindings).to receive(:rd_kafka_resume_partitions).and_return(20) - expect { + Rdkafka::Bindings.expects(:rd_kafka_resume_partitions).returns(20) + assert_raises(Rdkafka::RdkafkaError) do consumer.resume(Rdkafka::Consumer::TopicPartitionList.new) - }.to raise_error Rdkafka::RdkafkaError - end - - def send_one_message - producer.produce( - topic: topic, - payload: "payload 1", - key: "key 1" - ).wait + end end end describe "#seek" do - let(:topic) { "it-#{SecureRandom.uuid}" } - before do - admin = rdkafka_producer_config.admin - admin.create_topic(topic, 1, 1).wait - wait_for_topic(admin, topic) - admin.close + @seek_topic = "it-#{SecureRandom.uuid}" + seek_admin = rdkafka_producer_config.admin + seek_admin.create_topic(@seek_topic, 1, 1).wait + wait_for_topic(seek_admin, @seek_topic) + seek_admin.close end it "raises an error when seeking fails" do - fake_msg = OpenStruct.new(topic: topic, partition: 0, offset: 0) + fake_msg = OpenStruct.new(topic: @seek_topic, partition: 0, offset: 0) - expect(Rdkafka::Bindings).to receive(:rd_kafka_seek).and_return(20) - expect { + Rdkafka::Bindings.expects(:rd_kafka_seek).returns(20) + assert_raises(Rdkafka::RdkafkaError) do consumer.seek(fake_msg) - }.to raise_error Rdkafka::RdkafkaError + end end context "subscription" do - let(:timeout) { 1000 } - # Some specs here test the manual offset commit hence we want to ensure, that we have some - # offsets in-memory that we can manually commit - let(:consumer) { rdkafka_consumer_config("auto.commit.interval.ms": 60_000).consumer } - before do - consumer.subscribe(topic) + @timeout = 1000 + # Some specs here test the manual offset commit hence we want to ensure, that we have some + # offsets in-memory that we can manually commit + @consumer = rdkafka_consumer_config("auto.commit.interval.ms": 60_000).consumer + + consumer.subscribe(@seek_topic) # 1. partitions are assigned wait_for_assignment(consumer) - expect(consumer.assignment).not_to be_empty + refute_empty consumer.assignment # 2. eat unrelated messages - while consumer.poll(timeout) do; end + while consumer.poll(@timeout) do; end end after { consumer.unsubscribe } def send_one_message(val) producer.produce( - topic: topic, + topic: @seek_topic, payload: "payload #{val}", key: "key 1", partition: 0 @@ -222,12 +232,12 @@ def send_one_message(val) it "works when a partition is paused" do # 3. get reference message send_one_message(:a) - message1 = consumer.poll(timeout) - expect(message1&.payload).to eq "payload a" + message1 = consumer.poll(@timeout) + assert_equal "payload a", message1&.payload # 4. pause the subscription tpl = Rdkafka::Consumer::TopicPartitionList.new - tpl.add_topic(topic, 1) + tpl.add_topic(@seek_topic, 1) consumer.pause(tpl) # 5. seek to previous message @@ -235,19 +245,19 @@ def send_one_message(val) # 6. resume the subscription tpl = Rdkafka::Consumer::TopicPartitionList.new - tpl.add_topic(topic, 1) + tpl.add_topic(@seek_topic, 1) consumer.resume(tpl) # 7. ensure same message is read again - message2 = consumer.poll(timeout) + message2 = consumer.poll(@timeout) # This is needed because `enable.auto.offset.store` is true but when running in CI that # is overloaded, offset store lags sleep(1) consumer.commit - expect(message1.offset).to eq message2.offset - expect(message1.payload).to eq message2.payload + assert_equal message1.offset, message2.offset + assert_equal message1.payload, message2.payload end it "allows skipping messages" do @@ -257,8 +267,8 @@ def send_one_message(val) send_one_message(:c) # 4. get reference message - message = consumer.poll(timeout) - expect(message&.payload).to eq "payload a" + message = consumer.poll(@timeout) + assert_equal "payload a", message&.payload # 5. seek over one message fake_msg = message.dup @@ -266,53 +276,53 @@ def send_one_message(val) consumer.seek(fake_msg) # 6. ensure that only one message is available - records = consumer.poll(timeout) - expect(records&.payload).to eq "payload c" - records = consumer.poll(timeout) - expect(records).to be_nil + records = consumer.poll(@timeout) + assert_equal "payload c", records&.payload + records = consumer.poll(@timeout) + assert_nil records end end end describe "#seek_by" do - let(:consumer) { rdkafka_consumer_config("auto.commit.interval.ms": 60_000).consumer } - let(:topic) { "it-#{SecureRandom.uuid}" } - let(:partition) { 0 } - let(:offset) { 0 } - before do - admin = rdkafka_producer_config.admin - admin.create_topic(topic, 1, 1).wait - wait_for_topic(admin, topic) - admin.close + @seek_by_topic = "it-#{SecureRandom.uuid}" + @partition = 0 + @offset = 0 + @consumer = rdkafka_consumer_config("auto.commit.interval.ms": 60_000).consumer + + seek_admin = rdkafka_producer_config.admin + seek_admin.create_topic(@seek_by_topic, 1, 1).wait + wait_for_topic(seek_admin, @seek_by_topic) + seek_admin.close end it "raises an error when seeking fails" do - expect(Rdkafka::Bindings).to receive(:rd_kafka_seek).and_return(20) - expect { - consumer.seek_by(topic, partition, offset) - }.to raise_error Rdkafka::RdkafkaError + Rdkafka::Bindings.expects(:rd_kafka_seek).returns(20) + assert_raises(Rdkafka::RdkafkaError) do + consumer.seek_by(@seek_by_topic, @partition, @offset) + end end context "subscription" do - let(:timeout) { 1000 } - before do - consumer.subscribe(topic) + @timeout = 1000 + + consumer.subscribe(@seek_by_topic) # 1. partitions are assigned wait_for_assignment(consumer) - expect(consumer.assignment).not_to be_empty + refute_empty consumer.assignment # 2. eat unrelated messages - while consumer.poll(timeout) do; end + while consumer.poll(@timeout) do; end end after { consumer.unsubscribe } def send_one_message(val) producer.produce( - topic: topic, + topic: @seek_by_topic, payload: "payload #{val}", key: "key 1", partition: 0 @@ -322,12 +332,12 @@ def send_one_message(val) it "works when a partition is paused" do # 3. get reference message send_one_message(:a) - message1 = consumer.poll(timeout) - expect(message1&.payload).to eq "payload a" + message1 = consumer.poll(@timeout) + assert_equal "payload a", message1&.payload # 4. pause the subscription tpl = Rdkafka::Consumer::TopicPartitionList.new - tpl.add_topic(topic, 1) + tpl.add_topic(@seek_by_topic, 1) consumer.pause(tpl) # 5. seek by the previous message fields @@ -335,19 +345,19 @@ def send_one_message(val) # 6. resume the subscription tpl = Rdkafka::Consumer::TopicPartitionList.new - tpl.add_topic(topic, 1) + tpl.add_topic(@seek_by_topic, 1) consumer.resume(tpl) # 7. ensure same message is read again - message2 = consumer.poll(timeout) + message2 = consumer.poll(@timeout) # This is needed because `enable.auto.offset.store` is true but when running in CI that # is overloaded, offset store lags sleep(2) consumer.commit - expect(message1.offset).to eq message2.offset - expect(message1.payload).to eq message2.payload + assert_equal message1.offset, message2.offset + assert_equal message1.payload, message2.payload end it "allows skipping messages" do @@ -357,37 +367,37 @@ def send_one_message(val) send_one_message(:c) # 4. get reference message - message = consumer.poll(timeout) - expect(message&.payload).to eq "payload a" + message = consumer.poll(@timeout) + assert_equal "payload a", message&.payload # 5. seek over one message consumer.seek_by(message.topic, message.partition, message.offset + 2) # 6. ensure that only one message is available - records = consumer.poll(timeout) - expect(records&.payload).to eq "payload c" - records = consumer.poll(timeout) - expect(records).to be_nil + records = consumer.poll(@timeout) + assert_equal "payload c", records&.payload + records = consumer.poll(@timeout) + assert_nil records end end end describe "#assign and #assignment" do it "returns an empty assignment if nothing is assigned" do - expect(consumer.assignment).to be_empty + assert_empty consumer.assignment end it "onlies accept a topic partition list in assign" do - expect { + assert_raises(TypeError) do consumer.assign("list") - }.to raise_error TypeError + end end it "raises an error when assigning fails" do - expect(Rdkafka::Bindings).to receive(:rd_kafka_assign).and_return(20) - expect { + Rdkafka::Bindings.expects(:rd_kafka_assign).returns(20) + assert_raises(Rdkafka::RdkafkaError) do consumer.assign(Rdkafka::Consumer::TopicPartitionList.new) - }.to raise_error Rdkafka::RdkafkaError + end end it "assigns specific topic/partitions and return that assignment" do @@ -396,8 +406,8 @@ def send_one_message(val) consumer.assign(tpl) assignment = consumer.assignment - expect(assignment).not_to be_empty - expect(assignment.to_h[topic].length).to eq 3 + refute_empty assignment + assert_equal 3, assignment.to_h[topic].length end it "returns the assignment when subscribed" do @@ -417,15 +427,15 @@ def send_one_message(val) end assignment = consumer.assignment - expect(assignment).not_to be_empty - expect(assignment.to_h[topic].length).to eq 3 + refute_empty assignment + assert_equal 3, assignment.to_h[topic].length end it "raises an error when getting assignment fails" do - expect(Rdkafka::Bindings).to receive(:rd_kafka_assignment).and_return(20) - expect { + Rdkafka::Bindings.expects(:rd_kafka_assignment).returns(20) + assert_raises(Rdkafka::RdkafkaError) do consumer.assignment - }.to raise_error Rdkafka::RdkafkaError + end end end @@ -436,7 +446,7 @@ def send_one_message(val) list.add_topic(topic) end - expect(consumer.assignment_lost?).to be false + assert_equal false, consumer.assignment_lost? consumer.unsubscribe end @@ -447,7 +457,7 @@ def send_one_message(val) end consumer.unsubscribe - expect(consumer.assignment_lost?).to be false + assert_equal false, consumer.assignment_lost? end end @@ -463,9 +473,10 @@ def send_one_message(val) ).wait end consumer.close - expect { + e = assert_raises(Rdkafka::ClosedConsumerError) do consumer.poll(100) - }.to raise_error(Rdkafka::ClosedConsumerError, /poll/) + end + assert_match(/poll/, e.message) end context "when there are outgoing operations in other threads" do @@ -489,15 +500,15 @@ def send_one_message(val) close_time = Time.now thread.join - expect(times).to all(be < close_time) + times.each { |time| assert_operator time, :<, close_time } end end end describe "#position, #commit, #committed and #store_offset" do # Make sure there are messages to work with - let!(:report) do - producer.produce( + before do + @report = producer.produce( topic: topic, payload: "payload 1", key: "key 1", @@ -505,33 +516,31 @@ def send_one_message(val) ).wait end - let(:message) do - wait_for_message( + def fetched_message + @fetched_message ||= wait_for_message( topic: topic, - delivery_report: report, + delivery_report: @report, consumer: consumer ) end describe "#position" do it "onlies accept a topic partition list in position if not nil" do - expect { + assert_raises(TypeError) do consumer.position("list") - }.to raise_error TypeError + end end end describe "#committed" do it "onlies accept a topic partition list in commit if not nil" do - expect { + assert_raises(TypeError) do consumer.commit("list") - }.to raise_error TypeError + end end it "commits in sync mode" do - expect { - consumer.commit(nil, true) - }.not_to raise_error + consumer.commit(nil, true) end end @@ -566,24 +575,24 @@ def send_one_message(val) consumer.commit(list) partitions = consumer.committed(list).to_h[topic] - expect(partitions[0].offset).to eq 1 - expect(partitions[1].offset).to eq 2 - expect(partitions[2].offset).to eq 3 + assert_equal 1, partitions[0].offset + assert_equal 2, partitions[1].offset + assert_equal 3, partitions[2].offset end it "raises an error when committing fails" do - expect(Rdkafka::Bindings).to receive(:rd_kafka_commit).and_return(20) + Rdkafka::Bindings.expects(:rd_kafka_commit).returns(20) - expect { + assert_raises(Rdkafka::RdkafkaError) do consumer.commit - }.to raise_error(Rdkafka::RdkafkaError) + end end describe "#committed" do it "fetches the committed offsets for the current assignment" do partitions = consumer.committed.to_h[topic] - expect(partitions).not_to be_nil - expect(partitions[0].offset).to eq 1 + refute_nil partitions + assert_equal 1, partitions[0].offset end it "fetches the committed offsets for a specified topic partition list" do @@ -591,55 +600,45 @@ def send_one_message(val) list.add_topic(topic, [0, 1, 2]) end partitions = consumer.committed(list).to_h[topic] - expect(partitions).not_to be_nil - expect(partitions[0].offset).to eq 1 - expect(partitions[1].offset).to eq 1 - expect(partitions[2].offset).to eq 1 + refute_nil partitions + assert_equal 1, partitions[0].offset + assert_equal 1, partitions[1].offset + assert_equal 1, partitions[2].offset end it "raises an error when getting committed fails" do - expect(Rdkafka::Bindings).to receive(:rd_kafka_committed).and_return(20) + Rdkafka::Bindings.expects(:rd_kafka_committed).returns(20) list = Rdkafka::Consumer::TopicPartitionList.new.tap do |list| list.add_topic(topic, [0, 1, 2]) end - expect { + assert_raises(Rdkafka::RdkafkaError) do consumer.committed(list) - }.to raise_error Rdkafka::RdkafkaError + end end end describe "#store_offset" do - let(:consumer) { rdkafka_consumer_config("enable.auto.offset.store": false).consumer } - let(:metadata) { SecureRandom.uuid } - let(:group_id) { SecureRandom.uuid } - let(:base_config) do - { - "group.id": group_id, + before do + @group_id = SecureRandom.uuid + @base_config = { + "group.id": @group_id, "enable.auto.offset.store": false, "enable.auto.commit": false } - end - # Produce a fresh message and consume it with a dedicated consumer - # to avoid conflicts with the committed offsets from the parent context - let(:store_offset_report) do - producer.produce( + @store_offset_report = producer.produce( topic: topic, payload: "payload store_offset", key: "key store_offset", partition: 0 ).wait - end - let(:message) do - wait_for_message( + @store_message = wait_for_message( topic: topic, - delivery_report: store_offset_report + delivery_report: @store_offset_report ) - end - before do - @new_consumer = rdkafka_consumer_config(base_config).consumer + @new_consumer = rdkafka_consumer_config(@base_config).consumer @new_consumer.subscribe(topic) wait_for_assignment(@new_consumer) end @@ -649,76 +648,92 @@ def send_one_message(val) end it "stores the offset for a message" do - @new_consumer.store_offset(message) + @new_consumer.store_offset(@store_message) @new_consumer.commit - # TODO use position here, should be at offset - list = Rdkafka::Consumer::TopicPartitionList.new.tap do |list| list.add_topic(topic, [0, 1, 2]) end partitions = @new_consumer.committed(list).to_h[topic] - expect(partitions).not_to be_nil - expect(partitions[message.partition].offset).to eq(message.offset + 1) + refute_nil partitions + assert_equal @store_message.offset + 1, partitions[@store_message.partition].offset end it "raises an error with invalid input" do - allow(message).to receive(:partition).and_return(9999) - expect { - @new_consumer.store_offset(message) - }.to raise_error Rdkafka::RdkafkaError + @store_message.stubs(:partition).returns(9999) + assert_raises(Rdkafka::RdkafkaError) do + @new_consumer.store_offset(@store_message) + end end describe "#position" do - let(:polled_message) do + it "fetches the positions for the current assignment" do # consumer must poll the message directly (not via a separate consumer) # for position to reflect the fetch offset + @consumer = rdkafka_consumer_config("enable.auto.offset.store": false).consumer report = producer.produce( topic: topic, payload: "payload position", key: "key position", partition: 0 ).wait - wait_for_message( + polled_message = wait_for_message( topic: topic, delivery_report: report, consumer: consumer ) - end - it "fetches the positions for the current assignment" do consumer.store_offset(polled_message) partitions = consumer.position.to_h[topic] - expect(partitions).not_to be_nil - expect(partitions[0].offset).to eq polled_message.offset + 1 + refute_nil partitions + assert_equal polled_message.offset + 1, partitions[0].offset end it "fetches the positions for a specified assignment" do + @consumer = rdkafka_consumer_config("enable.auto.offset.store": false).consumer + report = producer.produce( + topic: topic, + payload: "payload position", + key: "key position", + partition: 0 + ).wait + polled_message = wait_for_message( + topic: topic, + delivery_report: report, + consumer: consumer + ) + consumer.store_offset(polled_message) list = Rdkafka::Consumer::TopicPartitionList.new.tap do |list| list.add_topic_and_partitions_with_offsets(topic, 0 => nil, 1 => nil, 2 => nil) end partitions = consumer.position(list).to_h[topic] - expect(partitions).not_to be_nil - expect(partitions[0].offset).to eq polled_message.offset + 1 + refute_nil partitions + assert_equal polled_message.offset + 1, partitions[0].offset end it "raises an error when getting the position fails" do - expect(Rdkafka::Bindings).to receive(:rd_kafka_position).and_return(20) + Rdkafka::Bindings.expects(:rd_kafka_position).returns(20) - expect { + assert_raises(Rdkafka::RdkafkaError) do consumer.position - }.to raise_error(Rdkafka::RdkafkaError) + end end end context "when trying to use with enable.auto.offset.store set to true" do - let(:consumer) { rdkafka_consumer_config("enable.auto.offset.store": true).consumer } - it "expect to raise invalid configuration error" do - expect { consumer.store_offset(message) }.to raise_error(Rdkafka::RdkafkaError, /invalid_arg/) + auto_store_consumer = rdkafka_consumer_config("enable.auto.offset.store": true).consumer + begin + e = assert_raises(Rdkafka::RdkafkaError) do + auto_store_consumer.store_offset(@store_message) + end + assert_match(/invalid_arg/, e.message) + ensure + auto_store_consumer.close + end end end end @@ -736,22 +751,22 @@ def send_one_message(val) ).wait low, high = consumer.query_watermark_offsets(topic, 0, 5000) - expect(low).to eq 0 - expect(high).to be > 0 + assert_equal 0, low + assert_operator high, :>, 0 end it "raises an error when querying offsets fails" do - expect(Rdkafka::Bindings).to receive(:rd_kafka_query_watermark_offsets).and_return(20) - expect { + Rdkafka::Bindings.expects(:rd_kafka_query_watermark_offsets).returns(20) + assert_raises(Rdkafka::RdkafkaError) do consumer.query_watermark_offsets(topic, 0, 5000) - }.to raise_error Rdkafka::RdkafkaError + end end end describe "#lag" do - let(:consumer) { rdkafka_consumer_config("enable.partition.eof": true).consumer } - it "calculates the consumer lag" do + @consumer = rdkafka_consumer_config("enable.partition.eof": true).consumer + # Make sure there's a message in every partition and # wait for the message to make sure everything is committed. (0..2).each do |i| @@ -777,8 +792,7 @@ def send_one_message(val) # Commit consumer.commit - # Create list to fetch lag for. TODO creating the list will not be necessary - # after committed uses the subscription. + # Create list to fetch lag for. list = consumer.committed(Rdkafka::Consumer::TopicPartitionList.new.tap do |l| l.add_topic(topic, 0..2) end) @@ -792,7 +806,7 @@ def send_one_message(val) 2 => 0 } } - expect(lag).to eq(expected_lag) + assert_equal expected_lag, lag # Produce message on every topic again (0..2).each do |i| @@ -812,10 +826,12 @@ def send_one_message(val) 2 => 1 } } - expect(lag).to eq(expected_lag) + assert_equal expected_lag, lag end it "returns nil if there are no messages on the topic" do + @consumer = rdkafka_consumer_config("enable.partition.eof": true).consumer + # Subscribe first to establish a group coordinator, otherwise # committed() can fail with not_coordinator in random test order consumer.subscribe(topic) @@ -829,7 +845,7 @@ def send_one_message(val) expected_lag = { topic => {} } - expect(lag).to eq(expected_lag) + assert_equal expected_lag, lag end end @@ -837,7 +853,7 @@ def send_one_message(val) it "returns the current ClusterId" do consumer.subscribe(topic) wait_for_assignment(consumer) - expect(consumer.cluster_id).not_to be_empty + refute_empty consumer.cluster_id end end @@ -845,34 +861,34 @@ def send_one_message(val) it "returns the current MemberId" do consumer.subscribe(topic) wait_for_assignment(consumer) - expect(consumer.member_id).to start_with("rdkafka-") + assert_match(/\Ardkafka-/, consumer.member_id) end end describe "#poll" do it "returns nil if there is no subscription" do - expect(consumer.poll(1000)).to be_nil + assert_nil consumer.poll(1000) end it "returns nil if there are no messages" do consumer.subscribe(topic) - expect(consumer.poll(1000)).to be_nil + assert_nil consumer.poll(1000) end it "returns a message if there is one" do - topic = "it-#{SecureRandom.uuid}" + poll_topic = "it-#{SecureRandom.uuid}" producer.produce( - topic: topic, + topic: poll_topic, payload: "payload 1", key: "key 1" ).wait - consumer.subscribe(topic) + consumer.subscribe(poll_topic) message = consumer.each { |m| break m } - expect(message).to be_a Rdkafka::Consumer::Message - expect(message.payload).to eq("payload 1") - expect(message.key).to eq("key 1") + assert_kind_of Rdkafka::Consumer::Message, message + assert_equal "payload 1", message.payload + assert_equal "key 1", message.key end it "raises an error when polling fails" do @@ -880,40 +896,40 @@ def send_one_message(val) message[:err] = 20 end message_pointer = message.to_ptr - expect(Rdkafka::Bindings).to receive(:rd_kafka_consumer_poll).and_return(message_pointer) - expect(Rdkafka::Bindings).to receive(:rd_kafka_message_destroy).with(message_pointer) - expect { + Rdkafka::Bindings.expects(:rd_kafka_consumer_poll).returns(message_pointer) + Rdkafka::Bindings.expects(:rd_kafka_message_destroy).with(message_pointer) + assert_raises(Rdkafka::RdkafkaError) do consumer.poll(100) - }.to raise_error Rdkafka::RdkafkaError + end end end describe "#poll_nb" do it "returns nil if there is no subscription" do - expect(consumer.poll_nb).to be_nil + assert_nil consumer.poll_nb end it "returns nil if there are no messages" do consumer.subscribe(topic) - expect(consumer.poll_nb).to be_nil + assert_nil consumer.poll_nb end it "accepts a timeout parameter" do consumer.subscribe(topic) - expect(consumer.poll_nb(0)).to be_nil - expect(consumer.poll_nb(100)).to be_nil + assert_nil consumer.poll_nb(0) + assert_nil consumer.poll_nb(100) end it "returns a message if there is one" do - topic = "it-#{SecureRandom.uuid}" + poll_nb_topic = "it-#{SecureRandom.uuid}" producer.produce( - topic: topic, + topic: poll_nb_topic, payload: "payload poll_nb", key: "key poll_nb" ).wait - consumer.subscribe(topic) + consumer.subscribe(poll_nb_topic) wait_for_assignment(consumer) # Give time for message to arrive @@ -926,9 +942,9 @@ def send_one_message(val) sleep 0.1 end - expect(message).to be_a Rdkafka::Consumer::Message - expect(message.payload).to eq("payload poll_nb") - expect(message.key).to eq("key poll_nb") + assert_kind_of Rdkafka::Consumer::Message, message + assert_equal "payload poll_nb", message.payload + assert_equal "key poll_nb", message.key end it "raises an error when polling fails" do @@ -936,18 +952,21 @@ def send_one_message(val) message[:err] = 20 end message_pointer = message.to_ptr - expect(Rdkafka::Bindings).to receive(:rd_kafka_consumer_poll_nb).and_return(message_pointer) - expect(Rdkafka::Bindings).to receive(:rd_kafka_message_destroy).with(message_pointer) - expect { + Rdkafka::Bindings.expects(:rd_kafka_consumer_poll_nb).returns(message_pointer) + Rdkafka::Bindings.expects(:rd_kafka_message_destroy).with(message_pointer) + assert_raises(Rdkafka::RdkafkaError) do consumer.poll_nb - }.to raise_error Rdkafka::RdkafkaError + end end context "when consumer is closed" do before { consumer.close } it "raises ClosedConsumerError" do - expect { consumer.poll_nb }.to raise_error(Rdkafka::ClosedConsumerError, /poll_nb/) + e = assert_raises(Rdkafka::ClosedConsumerError) do + consumer.poll_nb + end + assert_match(/poll_nb/, e.message) end end end @@ -961,9 +980,10 @@ def send_one_message(val) ).wait message = wait_for_message(topic: topic, consumer: consumer, delivery_report: report) - expect(message).to be - expect(message.key).to eq("key headers") - expect(message.headers).to include("foo" => "bar") + refute_nil message + assert_equal "key headers", message.key + assert_includes message.headers, "foo" + assert_equal "bar", message.headers["foo"] end it "returns message with headers using string keys (when produced with string keys)" do @@ -974,9 +994,10 @@ def send_one_message(val) ).wait message = wait_for_message(topic: topic, consumer: consumer, delivery_report: report) - expect(message).to be - expect(message.key).to eq("key headers") - expect(message.headers).to include("foo" => "bar") + refute_nil message + assert_equal "key headers", message.key + assert_includes message.headers, "foo" + assert_equal "bar", message.headers["foo"] end it "returns message with no headers" do @@ -987,13 +1008,13 @@ def send_one_message(val) ).wait message = wait_for_message(topic: topic, consumer: consumer, delivery_report: report) - expect(message).to be - expect(message.key).to eq("key no headers") - expect(message.headers).to be_empty + refute_nil message + assert_equal "key no headers", message.key + assert_empty message.headers end it "raises an error when message headers aren't readable" do - expect(Rdkafka::Bindings).to receive(:rd_kafka_message_headers).with(any_args).and_return(1) + Rdkafka::Bindings.expects(:rd_kafka_message_headers).with(anything, anything).returns(1) report = producer.produce( topic: topic, @@ -1001,16 +1022,15 @@ def send_one_message(val) headers: nil ).wait - expect { + err = assert_raises(Rdkafka::RdkafkaError) do wait_for_message(topic: topic, consumer: consumer, delivery_report: report) - }.to raise_error do |err| - expect(err).to be_instance_of(Rdkafka::RdkafkaError) - expect(err.message).to start_with("Error reading message headers") end + assert_kind_of Rdkafka::RdkafkaError, err + assert_match(/\AError reading message headers/, err.message) end it "raises an error when the first message header aren't readable" do - expect(Rdkafka::Bindings).to receive(:rd_kafka_header_get_all).with(any_args).and_return(1) + Rdkafka::Bindings.expects(:rd_kafka_header_get_all).with(anything, anything, anything, anything, anything).returns(1) report = producer.produce( topic: topic, @@ -1018,12 +1038,11 @@ def send_one_message(val) headers: { foo: "bar" } ).wait - expect { + err = assert_raises(Rdkafka::RdkafkaError) do wait_for_message(topic: topic, consumer: consumer, delivery_report: report) - }.to raise_error do |err| - expect(err).to be_instance_of(Rdkafka::RdkafkaError) - expect(err.message).to start_with("Error reading a message header at index 0") end + assert_kind_of Rdkafka::RdkafkaError, err + assert_match(/\AError reading a message header at index 0/, err.message) end end @@ -1044,7 +1063,7 @@ def send_one_message(val) # Check the first 10 messages. Then close the consumer, which # should break the each loop. consumer.each_with_index do |message, i| - expect(message).to be_a Rdkafka::Consumer::Message + assert_kind_of Rdkafka::Consumer::Message, message break if i == 9 end consumer.close @@ -1053,37 +1072,36 @@ def send_one_message(val) describe "#each_batch" do it "expect to raise an error" do - expect do + assert_raises(NotImplementedError) do consumer.each_batch {} - end.to raise_error(NotImplementedError) + end end end describe "#offsets_for_times" do it "raises when not TopicPartitionList" do - expect { consumer.offsets_for_times([]) }.to raise_error(TypeError) + assert_raises(TypeError) { consumer.offsets_for_times([]) } end it "raises an error when offsets_for_times fails" do tpl = Rdkafka::Consumer::TopicPartitionList.new - expect(Rdkafka::Bindings).to receive(:rd_kafka_offsets_for_times).and_return(7) + Rdkafka::Bindings.expects(:rd_kafka_offsets_for_times).returns(7) - expect { consumer.offsets_for_times(tpl) }.to raise_error(Rdkafka::RdkafkaError) + assert_raises(Rdkafka::RdkafkaError) { consumer.offsets_for_times(tpl) } end context "when subscribed" do - let(:timeout) { 1000 } - before do + @timeout = 1000 consumer.subscribe(topic) # 1. partitions are assigned wait_for_assignment(consumer) - expect(consumer.assignment).not_to be_empty + refute_empty consumer.assignment # 2. eat unrelated messages - while consumer.poll(timeout) do; end + while consumer.poll(@timeout) do; end end after { consumer.unsubscribe } @@ -1102,9 +1120,9 @@ def send_one_message(val) send_one_message("b") send_one_message("c") - consumer.poll(timeout) - message = consumer.poll(timeout) - consumer.poll(timeout) + consumer.poll(@timeout) + message = consumer.poll(@timeout) + consumer.poll(@timeout) tpl = Rdkafka::Consumer::TopicPartitionList.new.tap do |list| list.add_topic_and_partitions_with_offsets( @@ -1117,93 +1135,86 @@ def send_one_message(val) tpl_response = consumer.offsets_for_times(tpl) - expect(tpl_response.to_h[topic][0].offset).to eq message.offset + assert_equal message.offset, tpl_response.to_h[topic][0].offset end end end # Only relevant in case of a consumer with separate queues describe "#events_poll" do - let(:stats) { [] } - let(:consumer) do + it "expect to run events_poll, operate and propagate stats on events_poll and not poll" do + stats = [] + Rdkafka::Config.statistics_callback = ->(published) { stats << published } + config = rdkafka_consumer_config("statistics.interval.ms": 500) config.consumer_poll_set = false - config.consumer - end - - before { Rdkafka::Config.statistics_callback = ->(published) { stats << published } } + @consumer = config.consumer - after { Rdkafka::Config.statistics_callback = nil } - - it "expect to run events_poll, operate and propagate stats on events_poll and not poll" do consumer.subscribe(topic) consumer.poll(1_000) - expect(stats).to be_empty + assert_empty stats consumer.events_poll(-1) - expect(stats).not_to be_empty + refute_empty stats + ensure + Rdkafka::Config.statistics_callback = nil end end # Only relevant in case of a consumer with separate queues describe "#events_poll_nb" do - let(:stats) { [] } - let(:consumer) do + before do + @stats = [] + Rdkafka::Config.statistics_callback = ->(published) { @stats << published } + config = rdkafka_consumer_config("statistics.interval.ms": 500) config.consumer_poll_set = false - config.consumer + @consumer = config.consumer end - before { Rdkafka::Config.statistics_callback = ->(published) { stats << published } } - after { Rdkafka::Config.statistics_callback = nil } it "returns the number of events processed" do consumer.subscribe(topic) result = consumer.events_poll_nb - expect(result).to be_a(Integer) - expect(result).to be >= 0 + assert_kind_of Integer, result + assert_operator result, :>=, 0 end it "accepts a timeout parameter" do consumer.subscribe(topic) - expect(consumer.events_poll_nb(0)).to be >= 0 - expect(consumer.events_poll_nb(100)).to be >= 0 + assert_operator consumer.events_poll_nb(0), :>=, 0 + assert_operator consumer.events_poll_nb(100), :>=, 0 end it "processes events without releasing GVL" do consumer.subscribe(topic) consumer.poll(1_000) - expect(stats).to be_empty + assert_empty @stats # Wait for statistics to be ready sleep 0.6 # Non-blocking poll should also process stats events consumer.events_poll_nb(100) - expect(stats).not_to be_empty + refute_empty @stats end end describe "#consumer_group_metadata_pointer" do - let(:pointer) { consumer.consumer_group_metadata_pointer } - - after { Rdkafka::Bindings.rd_kafka_consumer_group_metadata_destroy(pointer) } - it "expect to return a pointer" do - expect(pointer).to be_a(FFI::Pointer) + pointer = consumer.consumer_group_metadata_pointer + begin + assert_kind_of FFI::Pointer, pointer + ensure + Rdkafka::Bindings.rd_kafka_consumer_group_metadata_destroy(pointer) + end end end describe "a rebalance listener" do - let(:consumer) do - config = rdkafka_consumer_config - config.consumer_rebalance_listener = listener - config.consumer - end - context "with a working listener" do - let(:listener) do - Struct.new(:queue) do + it "gets notifications" do + listener = Struct.new(:queue) do def on_partitions_assigned(list) collect(:assign, list) end @@ -1217,21 +1228,23 @@ def collect(name, list) queue << ([name] + partitions) end end.new([]) - end - it "gets notifications" do + config = rdkafka_consumer_config + config.consumer_rebalance_listener = listener + @consumer = config.consumer + notify_listener(listener, topic: topic) - expect(listener.queue).to eq([ + assert_equal [ [:assign, topic, 0, 1, 2], [:revoke, topic, 0, 1, 2] - ]) + ], listener.queue end end context "with a broken listener" do - let(:listener) do - Struct.new(:queue) do + it "handles callback exceptions" do + listener = Struct.new(:queue) do def on_partitions_assigned(list) queue << :assigned raise "boom" @@ -1242,12 +1255,14 @@ def on_partitions_revoked(list) raise "boom" end end.new([]) - end - it "handles callback exceptions" do + config = rdkafka_consumer_config + config.consumer_rebalance_listener = listener + @consumer = config.consumer + notify_listener(listener, topic: topic) - expect(listener.queue).to eq([:assigned, :revoked]) + assert_equal [:assigned, :revoked], listener.queue end end end @@ -1272,39 +1287,29 @@ def on_partitions_revoked(list) poll_nb: [] }.each do |method, args| it "raises an exception if #{method} is called" do - expect { + e = assert_raises(Rdkafka::ClosedConsumerError) do if args.nil? consumer.public_send(method) else consumer.public_send(method, *args) end - }.to raise_exception(Rdkafka::ClosedConsumerError, /#{method}/) + end + assert_match(/#{method}/, e.message) end end end it "provides a finalizer that closes the native kafka client" do - expect(consumer.closed?).to be(false) + assert_equal false, consumer.closed? consumer.finalizer.call("some-ignored-object-id") - expect(consumer.closed?).to be(true) + assert_equal true, consumer.closed? end context "when the rebalance protocol is cooperative" do - let(:consumer) do - config = rdkafka_consumer_config( - { - "partition.assignment.strategy": "cooperative-sticky", - debug: "consumer" - } - ) - config.consumer_rebalance_listener = listener - config.consumer - end - - let(:listener) do - Struct.new(:queue) do + it "is able to assign and unassign partitions using the cooperative partition assignment APIs" do + listener = Struct.new(:queue) do def on_partitions_assigned(list) collect(:assign, list) end @@ -1318,9 +1323,16 @@ def collect(name, list) queue << ([name] + partitions) end end.new([]) - end - it "is able to assign and unassign partitions using the cooperative partition assignment APIs" do + config = rdkafka_consumer_config( + { + "partition.assignment.strategy": "cooperative-sticky", + debug: "consumer" + } + ) + config.consumer_rebalance_listener = listener + @consumer = config.consumer + notify_listener(listener, topic: topic) do handles = [] 10.times do @@ -1337,15 +1349,15 @@ def collect(name, list) # Check the first 10 messages. Then close the consumer, which # should break the each loop. consumer.each_with_index do |message, i| - expect(message).to be_a Rdkafka::Consumer::Message + assert_kind_of Rdkafka::Consumer::Message, message break if i == 9 end end - expect(listener.queue).to eq([ + assert_equal [ [:assign, topic, 0, 1, 2], [:revoke, topic, 0, 1, 2] - ]) + ], listener.queue end end @@ -1357,36 +1369,36 @@ def collect(name, list) lifetime_ms: Time.now.to_i * 1000 + 900 * 1000, principal_name: "kafka-cluster" ) - expect(response).to eq(Rdkafka::Bindings::RD_KAFKA_RESP_ERR__STATE) + assert_equal Rdkafka::Bindings::RD_KAFKA_RESP_ERR__STATE, response end end context "when sasl configured" do before do - $consumer_sasl = rdkafka_producer_config( + @consumer_sasl = rdkafka_producer_config( "security.protocol": "sasl_ssl", "sasl.mechanisms": "OAUTHBEARER" ).consumer end after do - $consumer_sasl.close + @consumer_sasl.close end context "without extensions" do it "succeeds" do - response = $consumer_sasl.oauthbearer_set_token( + response = @consumer_sasl.oauthbearer_set_token( token: "foo", lifetime_ms: Time.now.to_i * 1000 + 900 * 1000, principal_name: "kafka-cluster" ) - expect(response).to eq(0) + assert_equal 0, response end end context "with extensions" do it "succeeds" do - response = $consumer_sasl.oauthbearer_set_token( + response = @consumer_sasl.oauthbearer_set_token( token: "foo", lifetime_ms: Time.now.to_i * 1000 + 900 * 1000, principal_name: "kafka-cluster", @@ -1394,16 +1406,16 @@ def collect(name, list) "foo" => "bar" } ) - expect(response).to eq(0) + assert_equal 0, response end end end end describe "when reaching eof on a topic and eof reporting enabled" do - let(:consumer) { rdkafka_consumer_config("enable.partition.eof": true).consumer } - it "returns proper details" do + @consumer = rdkafka_consumer_config("enable.partition.eof": true).consumer + (0..2).each do |i| producer.produce( topic: topic, @@ -1425,20 +1437,15 @@ def collect(name, list) break if eof_error end - expect(eof_error.code).to eq(:partition_eof) + assert_equal :partition_eof, eof_error.code end end describe "long running consumption" do - let(:consumer) { rdkafka_consumer_config.consumer } - let(:producer) { rdkafka_producer_config.producer } - - after { - consumer.close - producer.close - } - it "consumes messages continuously for 60 seconds" do + @consumer = rdkafka_consumer_config.consumer + @producer = rdkafka_producer_config.producer + consumer.subscribe(topic) wait_for_assignment(consumer) @@ -1464,8 +1471,8 @@ def collect(name, list) while Time.now - start_time < 60 message = consumer.poll(1000) if message - expect(message).to be_a Rdkafka::Consumer::Message - expect(message.topic).to eq(topic) + assert_kind_of Rdkafka::Consumer::Message, message + assert_equal topic, message.topic messages_consumed += 1 consumer.commit if messages_consumed % 10 == 0 end @@ -1473,47 +1480,50 @@ def collect(name, list) producer_thread.join - expect(messages_consumed).to be > 50 # Should consume most messages + assert_operator messages_consumed, :>, 50 # Should consume most messages end end describe "#events_poll_nb_each" do it "does not raise when queue is empty" do - expect { consumer.events_poll_nb_each { |_| } }.not_to raise_error + consumer.events_poll_nb_each { |_| } end it "yields the count after each poll" do counts = [] # Stub to return events, then zero call_count = 0 - allow(Rdkafka::Bindings).to receive(:rd_kafka_poll_nb) do + Rdkafka::Bindings.stubs(:rd_kafka_poll_nb).with do call_count += 1 - (call_count <= 2) ? 1 : 0 - end + true + end.returns(1, 1, 0) consumer.events_poll_nb_each { |count| counts << count } - expect(counts).to eq([1, 1]) + assert_equal [1, 1], counts end it "stops when block returns :stop" do iterations = 0 # Stub to always return events - allow(Rdkafka::Bindings).to receive(:rd_kafka_poll_nb).and_return(1) + Rdkafka::Bindings.stubs(:rd_kafka_poll_nb).returns(1) consumer.events_poll_nb_each do |_count| iterations += 1 :stop if iterations >= 3 end - expect(iterations).to eq(3) + assert_equal 3, iterations end context "when consumer is closed" do before { consumer.close } it "raises ClosedConsumerError" do - expect { consumer.events_poll_nb_each { |_| } }.to raise_error(Rdkafka::ClosedConsumerError, /events_poll_nb_each/) + e = assert_raises(Rdkafka::ClosedConsumerError) do + consumer.events_poll_nb_each { |_| } + end + assert_match(/events_poll_nb_each/, e.message) end end end @@ -1526,7 +1536,7 @@ def collect(name, list) messages = [] consumer.poll_nb_each { |msg| messages << msg } - expect(messages).to be_a(Array) + assert_kind_of Array, messages end it "yields messages and respects :stop" do @@ -1556,7 +1566,7 @@ def collect(name, list) :stop if messages.size >= 1 end - expect(messages.size).to eq(1) + assert_equal 1, messages.size end it "properly cleans up message pointers" do @@ -1567,16 +1577,17 @@ def collect(name, list) sleep 2 # This should not leak memory - message_destroy is called in ensure - expect { - consumer.poll_nb_each { |_| } - }.not_to raise_error + consumer.poll_nb_each { |_| } end context "when consumer is closed" do before { consumer.close } it "raises ClosedConsumerError" do - expect { consumer.poll_nb_each { |_| } }.to raise_error(Rdkafka::ClosedConsumerError, /poll_nb_each/) + e = assert_raises(Rdkafka::ClosedConsumerError) do + consumer.poll_nb_each { |_| } + end + assert_match(/poll_nb_each/, e.message) end end end @@ -1586,7 +1597,7 @@ def collect(name, list) consumer.subscribe(topic) signal_r, signal_w = IO.pipe - expect { consumer.enable_queue_io_events(signal_w.fileno) }.not_to raise_error + consumer.enable_queue_io_events(signal_w.fileno) signal_r.close signal_w.close end @@ -1595,7 +1606,7 @@ def collect(name, list) consumer.subscribe(topic) signal_r, signal_w = IO.pipe - expect { consumer.enable_background_queue_io_events(signal_w.fileno) }.not_to raise_error + consumer.enable_background_queue_io_events(signal_w.fileno) signal_r.close signal_w.close end @@ -1605,7 +1616,7 @@ def collect(name, list) signal_r, signal_w = IO.pipe custom_payload = "hello" - expect { consumer.enable_queue_io_events(signal_w.fileno, custom_payload) }.not_to raise_error + consumer.enable_queue_io_events(signal_w.fileno, custom_payload) signal_r.close signal_w.close end @@ -1633,7 +1644,7 @@ def collect(name, list) end # We may or may not get messages depending on rebalancing, but should not error - expect(messages).to be_a(Array) + assert_kind_of Array, messages signal_r.close signal_w.close end @@ -1643,14 +1654,18 @@ def collect(name, list) it "raises ClosedInnerError when enabling queue_io_events" do signal_r, signal_w = IO.pipe - expect { consumer.enable_queue_io_events(signal_w.fileno) }.to raise_error(Rdkafka::ClosedInnerError) + assert_raises(Rdkafka::ClosedInnerError) do + consumer.enable_queue_io_events(signal_w.fileno) + end signal_r.close signal_w.close end it "raises ClosedInnerError when enabling background_queue_io_events" do signal_r, signal_w = IO.pipe - expect { consumer.enable_background_queue_io_events(signal_w.fileno) }.to raise_error(Rdkafka::ClosedInnerError) + assert_raises(Rdkafka::ClosedInnerError) do + consumer.enable_background_queue_io_events(signal_w.fileno) + end signal_r.close signal_w.close end diff --git a/test/lib/rdkafka/defaults_test.rb b/test/lib/rdkafka/defaults_test.rb new file mode 100644 index 00000000..01173f23 --- /dev/null +++ b/test/lib/rdkafka/defaults_test.rb @@ -0,0 +1,116 @@ +# frozen_string_literal: true + +require_relative "../../test_helper" + +describe Rdkafka::Defaults do + describe "consumer timeouts" do + it "defines CONSUMER_COMMITTED_TIMEOUT_MS as 2000" do + assert_equal 2_000, described_class::CONSUMER_COMMITTED_TIMEOUT_MS + end + + it "defines CONSUMER_QUERY_WATERMARK_TIMEOUT_MS as 1000" do + assert_equal 1_000, described_class::CONSUMER_QUERY_WATERMARK_TIMEOUT_MS + end + + it "defines CONSUMER_LAG_TIMEOUT_MS as 1000" do + assert_equal 1_000, described_class::CONSUMER_LAG_TIMEOUT_MS + end + + it "defines CONSUMER_OFFSETS_FOR_TIMES_TIMEOUT_MS as 1000" do + assert_equal 1_000, described_class::CONSUMER_OFFSETS_FOR_TIMES_TIMEOUT_MS + end + + it "defines CONSUMER_POLL_TIMEOUT_MS as 250" do + assert_equal 250, described_class::CONSUMER_POLL_TIMEOUT_MS + end + end + + describe "producer timeouts" do + it "defines PRODUCER_FLUSH_TIMEOUT_MS as 5000" do + assert_equal 5_000, described_class::PRODUCER_FLUSH_TIMEOUT_MS + end + + it "defines PRODUCER_PURGE_FLUSH_TIMEOUT_MS as 100" do + assert_equal 100, described_class::PRODUCER_PURGE_FLUSH_TIMEOUT_MS + end + end + + describe "metadata timeouts" do + it "defines METADATA_TIMEOUT_MS as 2000" do + assert_equal 2_000, described_class::METADATA_TIMEOUT_MS + end + end + + describe "handle timeouts" do + it "defines HANDLE_WAIT_TIMEOUT_MS as 60000" do + assert_equal 60_000, described_class::HANDLE_WAIT_TIMEOUT_MS + end + end + + describe "native kafka polling" do + it "defines NATIVE_KAFKA_POLL_TIMEOUT_MS as 100" do + assert_equal 100, described_class::NATIVE_KAFKA_POLL_TIMEOUT_MS + end + end + + describe "internal timing" do + it "defines PRODUCER_PURGE_SLEEP_INTERVAL_MS as 1" do + assert_equal 1, described_class::PRODUCER_PURGE_SLEEP_INTERVAL_MS + end + + it "defines NATIVE_KAFKA_SYNCHRONIZE_SLEEP_INTERVAL_MS as 10" do + assert_equal 10, described_class::NATIVE_KAFKA_SYNCHRONIZE_SLEEP_INTERVAL_MS + end + + it "defines METADATA_RETRY_BACKOFF_BASE_MS as 100" do + assert_equal 100, described_class::METADATA_RETRY_BACKOFF_BASE_MS + end + + it "defines METADATA_MAX_RETRIES as 10" do + assert_equal 10, described_class::METADATA_MAX_RETRIES + end + + it "defines CONSUMER_SEEK_TIMEOUT_MS as 0" do + assert_equal 0, described_class::CONSUMER_SEEK_TIMEOUT_MS + end + + it "defines CONSUMER_EVENTS_POLL_TIMEOUT_MS as 0" do + assert_equal 0, described_class::CONSUMER_EVENTS_POLL_TIMEOUT_MS + end + end + + describe "cache settings" do + it "defines PARTITIONS_COUNT_CACHE_TTL_MS as 30000" do + assert_equal 30_000, described_class::PARTITIONS_COUNT_CACHE_TTL_MS + end + end + + describe "immutability" do + it "all constants are frozen" do + constants = described_class.constants + refute_empty constants + expected_constants = %i[ + CONSUMER_COMMITTED_TIMEOUT_MS + CONSUMER_QUERY_WATERMARK_TIMEOUT_MS + CONSUMER_LAG_TIMEOUT_MS + CONSUMER_OFFSETS_FOR_TIMES_TIMEOUT_MS + CONSUMER_POLL_TIMEOUT_MS + PRODUCER_FLUSH_TIMEOUT_MS + PRODUCER_PURGE_FLUSH_TIMEOUT_MS + METADATA_TIMEOUT_MS + HANDLE_WAIT_TIMEOUT_MS + NATIVE_KAFKA_POLL_TIMEOUT_MS + PRODUCER_PURGE_SLEEP_INTERVAL_MS + NATIVE_KAFKA_SYNCHRONIZE_SLEEP_INTERVAL_MS + METADATA_RETRY_BACKOFF_BASE_MS + METADATA_MAX_RETRIES + CONSUMER_SEEK_TIMEOUT_MS + CONSUMER_EVENTS_POLL_TIMEOUT_MS + PARTITIONS_COUNT_CACHE_TTL_MS + ] + expected_constants.each do |const| + assert_includes constants, const + end + end + end +end diff --git a/test/lib/rdkafka/error_test.rb b/test/lib/rdkafka/error_test.rb new file mode 100644 index 00000000..047abec4 --- /dev/null +++ b/test/lib/rdkafka/error_test.rb @@ -0,0 +1,136 @@ +# frozen_string_literal: true + +require_relative "../../test_helper" + +describe Rdkafka::RdkafkaError do + it "raises a type error for a nil response" do + assert_raises(TypeError) do + described_class.new(nil) + end + end + + it "creates an error with a message prefix" do + assert_equal "message prefix", described_class.new(10, "message prefix").message_prefix + end + + it "creates an error with a broker message" do + assert_equal "broker message", described_class.new(10, broker_message: "broker message").broker_message + end + + it "creates an error with an instance name" do + assert_equal "rdkafka#producer-1", described_class.new(10, instance_name: "rdkafka#producer-1").instance_name + end + + it "defaults instance_name to nil" do + assert_nil described_class.new(10).instance_name + end + + describe "#code" do + it "handles an invalid response" do + assert_equal :err_933975?, described_class.new(933975).code + end + + it "returns error messages from rdkafka" do + assert_equal :msg_size_too_large, described_class.new(10).code + end + + it "strips a leading underscore" do + assert_equal :partition_eof, described_class.new(-191).code + end + end + + describe "#to_s" do + it "handles an invalid response" do + assert_equal "Err-933975? (err_933975?)", described_class.new(933975).to_s + end + + it "returns error messages from rdkafka" do + assert_equal "Broker: Message size too large (msg_size_too_large)", described_class.new(10).to_s + end + + it "adds the message prefix if present" do + assert_equal "Error explanation - Broker: Message size too large (msg_size_too_large)", described_class.new(10, "Error explanation").to_s + end + + it "adds the instance name if present" do + assert_equal "Broker: Message size too large (msg_size_too_large) [rdkafka#producer-1]", described_class.new(10, instance_name: "rdkafka#producer-1").to_s + end + + it "adds both message prefix and instance name if present" do + assert_equal "Error explanation - Broker: Message size too large (msg_size_too_large) [rdkafka#producer-1]", described_class.new(10, "Error explanation", instance_name: "rdkafka#producer-1").to_s + end + end + + describe "#message" do + it "handles an invalid response" do + assert_equal "Err-933975? (err_933975?)", described_class.new(933975).message + end + + it "returns error messages from rdkafka" do + assert_equal "Broker: Message size too large (msg_size_too_large)", described_class.new(10).message + end + + it "adds the message prefix if present" do + assert_equal "Error explanation - Broker: Message size too large (msg_size_too_large)", described_class.new(10, "Error explanation").message + end + + it "adds the instance name if present" do + assert_equal "Broker: Message size too large (msg_size_too_large) [rdkafka#producer-1]", described_class.new(10, instance_name: "rdkafka#producer-1").message + end + end + + describe "#is_partition_eof?" do + it "is false when not partition eof" do + assert_equal false, described_class.new(933975).is_partition_eof? + end + + it "is true when partition eof" do + assert_equal true, described_class.new(-191).is_partition_eof? + end + end + + describe "#==" do + before do + @error = described_class.new(10, "Error explanation") + end + + it "equals another error with the same content" do + assert_equal described_class.new(10, "Error explanation"), @error + end + + it "does not equal another error with a different error code" do + refute_equal described_class.new(20, "Error explanation"), @error + end + + it "does not equal another error with a different message" do + refute_equal described_class.new(10, "Different error explanation"), @error + end + + it "does not equal another error with no message" do + refute_equal described_class.new(10), @error + end + + it "does not equal another error with a different instance name" do + error_a = described_class.new(10, instance_name: "rdkafka#producer-1") + error_b = described_class.new(10, instance_name: "rdkafka#producer-2") + refute_equal error_b, error_a + end + + it "equals another error with the same instance name" do + error_a = described_class.new(10, instance_name: "rdkafka#producer-1") + error_b = described_class.new(10, instance_name: "rdkafka#producer-1") + assert_equal error_b, error_a + end + end +end + +describe Rdkafka::LibraryLoadError do + it "is a subclass of BaseError" do + assert_kind_of Rdkafka::BaseError, described_class.new + end + + it "accepts a message" do + error = described_class.new("test message") + assert_equal "test message", error.message + end +end diff --git a/test/lib/rdkafka/metadata_test.rb b/test/lib/rdkafka/metadata_test.rb new file mode 100644 index 00000000..8da27a0a --- /dev/null +++ b/test/lib/rdkafka/metadata_test.rb @@ -0,0 +1,80 @@ +# frozen_string_literal: true + +require_relative "../../test_helper" +require "securerandom" + +describe Rdkafka::Metadata do + before do + @config = rdkafka_consumer_config + @native_config = @config.send(:native_config) + @native_kafka = @config.send(:native_kafka, @native_config, :rd_kafka_consumer) + end + + after do + Rdkafka::Bindings.rd_kafka_consumer_close(@native_kafka) + Rdkafka::Bindings.rd_kafka_destroy(@native_kafka) + end + + context "passing in a topic name" do + context "that is non-existent topic" do + it "raises an appropriate exception" do + topic_name = SecureRandom.uuid.to_s + e = assert_raises(Rdkafka::RdkafkaError) do + described_class.new(@native_kafka, topic_name) + end + assert_match(/Broker: Unknown topic or partition \(unknown_topic_or_part\)/, e.message) + end + end + + context "that is one of our test topics" do + before do + @topic_name = TestTopics.create(partitions: 25) + @metadata = described_class.new(@native_kafka, @topic_name) + end + + it "#brokers returns our single broker" do + assert_equal 1, @metadata.brokers.length + assert_equal 1, @metadata.brokers[0][:broker_id] + assert_includes %w[127.0.0.1 localhost], @metadata.brokers[0][:broker_name] + assert_equal rdkafka_base_config[:"bootstrap.servers"].split(":").last.to_i, @metadata.brokers[0][:broker_port] + end + + it "#topics returns data on our test topic" do + assert_equal 1, @metadata.topics.length + assert_equal 25, @metadata.topics[0][:partition_count] + assert_equal 25, @metadata.topics[0][:partitions].length + assert_equal @topic_name, @metadata.topics[0][:topic_name] + end + end + end + + context "not passing in a topic name" do + it "#brokers returns our single broker" do + metadata = described_class.new(@native_kafka, nil) + assert_equal 1, metadata.brokers.length + assert_equal 1, metadata.brokers[0][:broker_id] + assert_includes %w[127.0.0.1 localhost], metadata.brokers[0][:broker_name] + assert_equal rdkafka_base_config[:"bootstrap.servers"].split(":").last.to_i, metadata.brokers[0][:broker_port] + end + + it "#topics returns data about existing topics" do + # Force topic creation before querying metadata + test_topic = TestTopics.create + metadata = described_class.new(@native_kafka, nil) + result = metadata.topics.map { |topic| topic[:topic_name] } + assert_includes result, test_topic + end + end + + context "when a non-zero error code is returned" do + it "creating the instance raises an exception" do + topic_name = SecureRandom.uuid.to_s + Rdkafka::Bindings.stubs(:rd_kafka_metadata).returns(-165) + + e = assert_raises(Rdkafka::RdkafkaError) do + described_class.new(@native_kafka, topic_name) + end + assert_match(/Local: Required feature not supported by broker \(unsupported_feature\)/, e.message) + end + end +end diff --git a/test/lib/rdkafka/native_kafka_test.rb b/test/lib/rdkafka/native_kafka_test.rb new file mode 100644 index 00000000..b76c3565 --- /dev/null +++ b/test/lib/rdkafka/native_kafka_test.rb @@ -0,0 +1,187 @@ +# frozen_string_literal: true + +require_relative "../../test_helper" + +describe Rdkafka::NativeKafka do + def setup + super + @config = rdkafka_producer_config + @native = @config.send(:native_kafka, @config.send(:native_config), :rd_kafka_producer) + @opaque = Rdkafka::Opaque.new + @thread = mock("thread") + @thread.stubs(:name=) + @thread.stubs(:[]=) + @thread.stubs(:join) + @thread.stubs(:abort_on_exception=) + Rdkafka::Bindings.stubs(:rd_kafka_name).returns("producer-1") + Thread.stubs(:new).returns(@thread) + end + + def new_client + described_class.new(@native, run_polling_thread: true, opaque: @opaque) + end + + def teardown + @client.close if @client && !@client.closed? + super + end + + context "defaults" do + it "sets the thread name" do + @thread.expects(:name=).with("rdkafka.native_kafka#producer-1") + @client = new_client + end + + it "sets the thread to abort on exception" do + @thread.expects(:abort_on_exception=).with(true) + @client = new_client + end + + it "sets the thread closing flag to false" do + @thread.expects(:[]=).with(:closing, false) + @client = new_client + end + end + + context "the polling thread" do + it "is created" do + Thread.expects(:new).returns(@thread) + @client = new_client + end + end + + it "exposes the inner client" do + @client = new_client + @client.with_inner do |inner| + assert_equal @native, inner + end + end + + context "when client was not yet closed (nil)" do + before do + @client = new_client + end + + it "is not closed" do + assert_equal false, @client.closed? + end + + context "and attempt to close" do + it "calls the destroy binding" do + Rdkafka::Bindings.expects(:rd_kafka_destroy).with(@native) + @client.close + end + + it "indicates to the polling thread that it is closing" do + @thread.expects(:[]=).with(:closing, true) + @client.close + end + + it "joins the polling thread" do + @thread.expects(:join) + @client.close + end + + it "closes and unassign the native client" do + @client.close + assert_equal true, @client.closed? + end + end + end + + context "when client was already closed" do + before do + @client = new_client + @client.close + end + + it "is closed" do + assert_equal true, @client.closed? + end + + context "and attempt to close again" do + it "does not call the destroy_flags binding" do + Rdkafka::Bindings.expects(:rd_kafka_destroy_flags).never + @client.close + end + + it "does not indicate to the polling thread that it is closing" do + @thread.expects(:[]=).with(:closing, true).never + @client.close + end + + it "does not join the polling thread" do + @thread.expects(:join).never + @client.close + end + + it "does not close and unassign the native client again" do + @client.close + assert_equal true, @client.closed? + end + end + end + + it "provides a finalizer that closes the native kafka client" do + @client = new_client + assert_equal false, @client.closed? + + @client.finalizer.call("some-ignored-object-id") + + assert_equal true, @client.closed? + end +end + +# Separate describe block for FD API tests to avoid interference with mocked threading tests +describe Rdkafka::NativeKafka, "#enable_main_queue_io_events and #enable_background_queue_io_events" do + def setup + super + @config = rdkafka_producer_config + @native = @config.send(:native_kafka, @config.send(:native_config), :rd_kafka_producer) + @opaque = Rdkafka::Opaque.new + @client = described_class.new(@native, run_polling_thread: false, opaque: @opaque, auto_start: false) + end + + def teardown + @client.close unless @client.closed? + super + end + + it "allows IO events when polling thread is not active" do + signal_r, signal_w = IO.pipe + + @client.enable_main_queue_io_events(signal_w.fileno) + @client.enable_background_queue_io_events(signal_w.fileno) + + signal_r.close + signal_w.close + end + + it "accepts custom payload for IO events" do + signal_r, signal_w = IO.pipe + payload = "custom" + + @client.enable_main_queue_io_events(signal_w.fileno, payload) + + signal_r.close + signal_w.close + end + + context "when client is closed" do + before { @client.close } + + it "raises ClosedInnerError when enabling main_queue_io_events" do + signal_r, signal_w = IO.pipe + assert_raises(Rdkafka::ClosedInnerError) { @client.enable_main_queue_io_events(signal_w.fileno) } + signal_r.close + signal_w.close + end + + it "raises ClosedInnerError when enabling background_queue_io_events" do + signal_r, signal_w = IO.pipe + assert_raises(Rdkafka::ClosedInnerError) { @client.enable_background_queue_io_events(signal_w.fileno) } + signal_r.close + signal_w.close + end + end +end diff --git a/test/lib/rdkafka/producer/delivery_handle_test.rb b/test/lib/rdkafka/producer/delivery_handle_test.rb new file mode 100644 index 00000000..11504412 --- /dev/null +++ b/test/lib/rdkafka/producer/delivery_handle_test.rb @@ -0,0 +1,77 @@ +# frozen_string_literal: true + +require_relative "../../../test_helper" + +describe Rdkafka::Producer::DeliveryHandle do + describe "#wait" do + before do + @handle = described_class.new.tap do |handle| + handle[:pending] = true + handle[:response] = 0 + handle[:partition] = 2 + handle[:offset] = 100 + handle.topic = TestTopics.unique + end + end + + it "waits until the timeout and then raise an error" do + e = assert_raises(Rdkafka::Producer::DeliveryHandle::WaitTimeoutError) { + @handle.wait(max_wait_timeout_ms: 100) + } + assert_match(/delivery/, e.message) + end + + context "when not pending anymore and no error" do + before do + @handle[:pending] = false + end + + it "returns a delivery report" do + report = @handle.wait + + assert_equal 2, report.partition + assert_equal 100, report.offset + assert_equal @handle.topic, report.topic_name + end + + it "waits without a timeout" do + report = @handle.wait(max_wait_timeout_ms: nil) + + assert_equal 2, report.partition + assert_equal 100, report.offset + assert_equal @handle.topic, report.topic_name + end + end + end + + describe "#create_result" do + before do + @response = 0 + @handle = described_class.new.tap do |handle| + handle[:pending] = false + handle[:response] = 0 + handle[:partition] = 2 + handle[:offset] = 100 + handle.topic = TestTopics.unique + end + end + + context "when response is 0" do + it "has nil error" do + report = @handle.create_result + assert_nil report.error + end + end + + context "when response is not 0" do + before do + @handle[:response] = 1 + end + + it "has the appropriate error" do + report = @handle.create_result + assert_equal Rdkafka::RdkafkaError.new(1), report.error + end + end + end +end diff --git a/test/lib/rdkafka/producer/delivery_report_test.rb b/test/lib/rdkafka/producer/delivery_report_test.rb new file mode 100644 index 00000000..b8f069b4 --- /dev/null +++ b/test/lib/rdkafka/producer/delivery_report_test.rb @@ -0,0 +1,30 @@ +# frozen_string_literal: true + +require_relative "../../../test_helper" + +describe Rdkafka::Producer::DeliveryReport do + before do + @topic_name = TestTopics.unique + @report = described_class.new(2, 100, @topic_name, -1) + end + + it "gets the partition" do + assert_equal 2, @report.partition + end + + it "gets the offset" do + assert_equal 100, @report.offset + end + + it "gets the topic_name" do + assert_equal @topic_name, @report.topic_name + end + + it "gets the same topic name under topic alias" do + assert_equal @topic_name, @report.topic + end + + it "gets the error" do + assert_equal(-1, @report.error) + end +end diff --git a/test/lib/rdkafka/producer/partitions_count_cache_test.rb b/test/lib/rdkafka/producer/partitions_count_cache_test.rb new file mode 100644 index 00000000..7be7663c --- /dev/null +++ b/test/lib/rdkafka/producer/partitions_count_cache_test.rb @@ -0,0 +1,415 @@ +# frozen_string_literal: true + +require_relative "../../../test_helper" + +describe Rdkafka::Producer::PartitionsCountCache do + before do + @default_ttl_ms = 1_000 # Reduced from 30000 to speed up tests + @custom_ttl_ms = 500 # Half the default TTL + @cache = described_class.new(ttl_ms: @default_ttl_ms) + @custom_ttl_cache = described_class.new(ttl_ms: @custom_ttl_ms) + @topic = TestTopics.unique + @topic2 = TestTopics.unique + @partition_count = 5 + @higher_partition_count = 10 + @lower_partition_count = 3 + @even_higher_partition_count = 15 + end + + describe "#initialize" do + it "creates a cache with default TTL when no TTL is specified" do + standard_cache = described_class.new + assert_kind_of described_class, standard_cache + end + + it "creates a cache with custom TTL when specified" do + assert_kind_of described_class, @custom_ttl_cache + end + + context "backwards compatibility with ttl (seconds)" do + it "works with old ttl parameter (emits deprecation warning to stderr)" do + # Note: Deprecation warning is emitted but not tested here due to stderr capture complexity + old_style_cache = described_class.new(1) # 1 second + assert_kind_of described_class, old_style_cache + end + + it "converts seconds to milliseconds correctly" do + old_style_cache = described_class.new(2) # 2 seconds = 2000ms + + # Set a value and verify the TTL behavior + old_style_cache.set(@topic, @partition_count) + + # Wait 1.5 seconds (should still be valid as TTL is 2 seconds) + sleep(1.5) + result = old_style_cache.get(@topic) { fail "Should not be called - cache should still be valid" } + assert_equal @partition_count, result + + # Wait another 0.7 seconds (total 2.2 seconds, should be expired) + sleep(0.7) + block_called = false + new_result = old_style_cache.get(@topic) do + block_called = true + @partition_count + 1 + end + assert_equal true, block_called + assert_equal @partition_count + 1, new_result + end + + it "accepts both ttl and ttl_ms parameters" do + cache_instance = described_class.new(1, ttl_ms: 1000) + assert_kind_of described_class, cache_instance + end + + it "uses ttl_ms when both parameters are provided" do + # ttl: 10 would be 10000ms, but ttl_ms: 500 should take precedence + both_params_cache = described_class.new(10, ttl_ms: 500) + + both_params_cache.set(@topic, @partition_count) + + # Wait 0.6 seconds (past 500ms TTL but not past 10 seconds) + sleep(0.6) + + # Should be expired because ttl_ms: 500 takes precedence + block_called = false + both_params_cache.get(@topic) do + block_called = true + @partition_count + 1 + end + + assert_equal true, block_called + end + end + end + + describe "#get" do + context "when cache is empty" do + it "yields to get the value and caches it" do + block_called = false + result = @cache.get(@topic) do + block_called = true + @partition_count + end + + assert_equal true, block_called + assert_equal @partition_count, result + + # Verify caching by checking if block is called again + second_block_called = false + second_result = @cache.get(@topic) do + second_block_called = true + @partition_count + 1 # Different value to ensure we get cached value + end + + assert_equal false, second_block_called + assert_equal @partition_count, second_result + end + end + + context "when cache has a value" do + before do + # Seed the cache with a value + @cache.get(@topic) { @partition_count } + end + + it "returns cached value without yielding if not expired" do + block_called = false + result = @cache.get(@topic) do + block_called = true + @partition_count + 1 # Different value to ensure we get cached one + end + + assert_equal false, block_called + assert_equal @partition_count, result + end + + it "yields to get new value when TTL has expired" do + # Wait for TTL to expire (convert ms to seconds) + sleep(@default_ttl_ms / 1000.0 + 0.1) + + block_called = false + new_count = @partition_count + 1 + result = @cache.get(@topic) do + block_called = true + new_count + end + + assert_equal true, block_called + assert_equal new_count, result + + # Verify the new value is cached + second_block_called = false + second_result = @cache.get(@topic) do + second_block_called = true + new_count + 1 # Different value again + end + + assert_equal false, second_block_called + assert_equal new_count, second_result + end + + it "respects a custom TTL" do + # Seed the custom TTL cache with a value + @custom_ttl_cache.get(@topic) { @partition_count } + + # Wait for custom TTL to expire but not default TTL (convert ms to seconds) + sleep(@custom_ttl_ms / 1000.0 + 0.1) + + # Custom TTL cache should refresh + custom_block_called = false + custom_result = @custom_ttl_cache.get(@topic) do + custom_block_called = true + @higher_partition_count + end + + assert_equal true, custom_block_called + assert_equal @higher_partition_count, custom_result + + # Default TTL cache should not refresh yet + default_block_called = false + default_result = @cache.get(@topic) do + default_block_called = true + @higher_partition_count + end + + assert_equal false, default_block_called + assert_equal @partition_count, default_result + end + end + + context "when new value is obtained" do + before do + # Seed the cache with initial value + @cache.get(@topic) { @partition_count } + end + + it "updates cache when new value is higher than cached value" do + # Wait for TTL to expire (convert ms to seconds) + sleep(@default_ttl_ms / 1000.0 + 0.1) + + # Get higher value + result = @cache.get(@topic) { @higher_partition_count } + assert_equal @higher_partition_count, result + + # Verify it was cached + second_result = @cache.get(@topic) { fail "Should not be called" } + assert_equal @higher_partition_count, second_result + end + + it "preserves higher cached value when new value is lower" do + # First update to higher value (convert ms to seconds) + sleep(@default_ttl_ms / 1000.0 + 0.1) + @cache.get(@topic) { @higher_partition_count } + + # Then try to update to lower value (convert ms to seconds) + sleep(@default_ttl_ms / 1000.0 + 0.1) + result = @cache.get(@topic) { @lower_partition_count } + + assert_equal @higher_partition_count, result + + # and subsequent gets should return the previously cached higher value + second_result = @cache.get(@topic) { fail "Should not be called" } + assert_equal @higher_partition_count, second_result + end + + it "handles multiple topics independently" do + # Set up both topics with different values + @cache.get(@topic) { @partition_count } + @cache.get(@topic2) { @higher_partition_count } + + # Wait for TTL to expire (convert ms to seconds) + sleep(@default_ttl_ms / 1000.0 + 0.1) + + # Update first topic + first_result = @cache.get(@topic) { @even_higher_partition_count } + assert_equal @even_higher_partition_count, first_result + + # Update second topic independently + second_updated = @higher_partition_count + 3 + second_result = @cache.get(@topic2) { second_updated } + assert_equal second_updated, second_result + + # Both topics should have their updated values + assert_equal @even_higher_partition_count, @cache.get(@topic) { fail "Should not be called" } + assert_equal second_updated, @cache.get(@topic2) { fail "Should not be called" } + end + end + end + + describe "#set" do + context "when cache is empty" do + it "adds a new entry to the cache" do + @cache.set(@topic, @partition_count) + + # Verify through get + result = @cache.get(@topic) { fail "Should not be called" } + assert_equal @partition_count, result + end + end + + context "when cache already has a value" do + before do + @cache.set(@topic, @partition_count) + end + + it "updates cache when new value is higher" do + @cache.set(@topic, @higher_partition_count) + + result = @cache.get(@topic) { fail "Should not be called" } + assert_equal @higher_partition_count, result + end + + it "keeps original value when new value is lower" do + @cache.set(@topic, @lower_partition_count) + + result = @cache.get(@topic) { fail "Should not be called" } + assert_equal @partition_count, result + end + + it "updates the timestamp even when keeping original value" do + # Set initial value + @cache.set(@topic, @partition_count) + + # Wait until close to TTL expiring (convert ms to seconds) + sleep(@default_ttl_ms / 1000.0 - 0.2) + + # Set lower value (should update timestamp but not value) + @cache.set(@topic, @lower_partition_count) + + # Wait a bit more, but still under the full TTL if timestamp was refreshed + sleep(0.3) + + # Should still be valid due to timestamp refresh + result = @cache.get(@topic) { fail "Should not be called" } + assert_equal @partition_count, result + end + end + + context "with concurrent access" do + it "correctly handles simultaneous updates to the same topic" do + # This test focuses on the final value after concurrent updates + threads = [] + + # Create 5 threads that all try to update the same topic with increasing values + 5.times do |i| + threads << Thread.new do + value = 10 + i # Start at 10 to ensure all are higher than initial value + @cache.set(@topic, value) + end + end + + # Wait for all threads to complete + threads.each(&:join) + + # The highest value (14) should be stored and accessible through get + result = @cache.get(@topic) { fail "Should not be called" } + assert_equal 14, result + end + end + end + + describe "TTL behavior" do + it "treats entries as expired when they exceed TTL" do + # Set initial value + @cache.get(@topic) { @partition_count } + + # Wait just under TTL (convert ms to seconds) + sleep(@default_ttl_ms / 1000.0 - 0.2) + + # Value should still be cached (block should not be called) + result = @cache.get(@topic) { fail "Should not be called when cache is valid" } + assert_equal @partition_count, result + + # Now wait to exceed TTL + sleep(0.3) # Total sleep is now default_ttl_ms / 1000.0 + 0.1 + + # Cache should be expired, block should be called + block_called = false + new_value = @partition_count + 3 + result = @cache.get(@topic) do + block_called = true + new_value + end + + assert_equal true, block_called + assert_equal new_value, result + end + end + + describe "comprehensive scenarios" do + it "handles a full lifecycle of cache operations" do + # 1. Initial cache miss, fetch and store + result1 = @cache.get(@topic) { @partition_count } + assert_equal @partition_count, result1 + + # 2. Cache hit + result2 = @cache.get(@topic) { fail "Should not be called" } + assert_equal @partition_count, result2 + + # 3. Attempt to set lower value + @cache.set(@topic, @lower_partition_count) + result3 = @cache.get(@topic) { fail "Should not be called" } + # Should still return the higher original value + assert_equal @partition_count, result3 + + # 4. Set higher value + @cache.set(@topic, @higher_partition_count) + result4 = @cache.get(@topic) { fail "Should not be called" } + assert_equal @higher_partition_count, result4 + + # 5. TTL expires, new value provided is lower (convert ms to seconds) + sleep(@default_ttl_ms / 1000.0 + 0.1) + result5 = @cache.get(@topic) { @lower_partition_count } + # This returns the highest value + assert_equal @higher_partition_count, result5 + + # 6. But subsequent get should return the higher cached value + result6 = @cache.get(@topic) { fail "Should not be called" } + assert_equal @higher_partition_count, result6 + + # 7. Set new highest value directly + even_higher = @higher_partition_count + 5 + @cache.set(@topic, even_higher) + result7 = @cache.get(@topic) { fail "Should not be called" } + assert_equal even_higher, result7 + end + + it "handles multiple topics with different TTLs correctly" do + # Set up initial values + @cache.get(@topic) { @partition_count } + @custom_ttl_cache.get(@topic) { @partition_count } + + # Wait past custom TTL but not default TTL (convert ms to seconds) + sleep(@custom_ttl_ms / 1000.0 + 0.1) + + # Default cache should NOT refresh (still within default TTL) + default_result = @cache.get(@topic) { fail "Should not be called for default cache" } + # Original value should be maintained + assert_equal @partition_count, default_result + + # Custom TTL cache SHOULD refresh (past custom TTL) + custom_cache_value = @partition_count + 8 + custom_block_called = false + custom_result = @custom_ttl_cache.get(@topic) do + custom_block_called = true + custom_cache_value + end + + assert_equal true, custom_block_called + assert_equal custom_cache_value, custom_result + + # Now wait past default TTL (convert ms to seconds) + sleep((@default_ttl_ms - @custom_ttl_ms) / 1000.0 + 0.1) + + # Now default cache should also refresh + default_block_called = false + new_default_value = @partition_count + 10 + new_default_result = @cache.get(@topic) do + default_block_called = true + new_default_value + end + + assert_equal true, default_block_called + assert_equal new_default_value, new_default_result + end + end +end diff --git a/spec/lib/rdkafka/producer_spec.rb b/test/lib/rdkafka/producer_test.rb similarity index 66% rename from spec/lib/rdkafka/producer_spec.rb rename to test/lib/rdkafka/producer_test.rb index c0226a9d..843b2dca 100644 --- a/spec/lib/rdkafka/producer_spec.rb +++ b/test/lib/rdkafka/producer_test.rb @@ -2,13 +2,28 @@ require "zlib" -RSpec.describe Rdkafka::Producer do - let(:producer) { rdkafka_producer_config.producer } - let(:all_partitioners) { %w[random consistent consistent_random murmur2 murmur2_random fnv1a fnv1a_random] } - let(:producer) { rdkafka_producer_config.producer } - let(:consumer) { rdkafka_consumer_config.consumer } - let(:topic) { TestTopics.create } - let(:topic_25) { TestTopics.create(partitions: 25) } +require_relative "../../test_helper" + +describe Rdkafka::Producer do + def producer + @producer ||= rdkafka_producer_config.producer + end + + def consumer + @consumer ||= rdkafka_consumer_config.consumer + end + + def topic + @topic ||= TestTopics.create + end + + def topic_25 + @topic_25 ||= TestTopics.create(partitions: 25) + end + + before do + @all_partitioners = %w[random consistent consistent_random murmur2 murmur2_random fnv1a fnv1a_random] + end after do # Registry should always end up being empty. @@ -19,56 +34,52 @@ sleep(0.05) end - expect(registry).to be_empty, registry.inspect + assert_empty registry, registry.inspect producer.close consumer.close end describe "producer without auto-start" do - let(:producer) { rdkafka_producer_config.producer(native_kafka_auto_start: false) } - it "expect to be able to start it later and close" do + @producer = rdkafka_producer_config.producer(native_kafka_auto_start: false) producer.start producer.close end it "expect to be able to close it without starting" do + @producer = rdkafka_producer_config.producer(native_kafka_auto_start: false) producer.close end end describe "#name" do - it { expect(producer.name).to include("rdkafka#producer-") } + it "includes rdkafka#producer-" do + assert_includes producer.name, "rdkafka#producer-" + end end describe "#produce with topic config alterations" do context "when config is not valid" do it "expect to raise error" do - expect do + assert_raises(Rdkafka::Config::ConfigError) do producer.produce(topic: "test", payload: "", topic_config: { invalid: "invalid" }) - end.to raise_error(Rdkafka::Config::ConfigError) + end end end context "when config is valid" do - it "expect to raise error" do - expect do - producer.produce(topic: "test", payload: "", topic_config: { acks: 1 }).wait - end.not_to raise_error + it "expect not to raise error" do + producer.produce(topic: "test", payload: "", topic_config: { acks: 1 }).wait end context "when alteration should change behavior" do - # This is set incorrectly for a reason - # If alteration would not work, this will hang the spec suite - let(:producer) do - rdkafka_producer_config( + it "expect to give up on delivery fast based on alteration config" do + @producer = rdkafka_producer_config( "message.timeout.ms": 1_000_000, "bootstrap.servers": "127.0.0.1:9094" ).producer - end - it "expect to give up on delivery fast based on alteration config" do - expect do + e = assert_raises(Rdkafka::RdkafkaError) do producer.produce( topic: "produce_config_test", payload: "test", @@ -77,7 +88,8 @@ "message.timeout.ms": 1 } ).wait - end.to raise_error(Rdkafka::RdkafkaError, /msg_timed_out/) + end + assert_match(/msg_timed_out/, e.message) end end end @@ -86,22 +98,20 @@ context "delivery callback" do context "with a proc/lambda" do it "sets the callback" do - expect { - producer.delivery_callback = lambda do |delivery_handle| - end - }.not_to raise_error - expect(producer.delivery_callback).to respond_to :call + producer.delivery_callback = lambda do |delivery_handle| + end + assert_respond_to producer.delivery_callback, :call end it "calls the callback when a message is delivered" do @callback_called = false producer.delivery_callback = lambda do |report| - expect(report).not_to be_nil - expect(report.label).to eq "label" - expect(report.partition).to eq 1 - expect(report.offset).to be >= 0 - expect(report.topic_name).to eq topic + refute_nil report + assert_equal "label", report.label + assert_equal 1, report.partition + assert_operator report.offset, :>=, 0 + assert_equal topic, report.topic_name @callback_called = true end @@ -113,7 +123,7 @@ label: "label" ) - expect(handle.label).to eq "label" + assert_equal "label", handle.label # Wait for it to be delivered handle.wait(max_wait_timeout_ms: 15_000) @@ -122,7 +132,7 @@ producer.close # Callback should have been called - expect(@callback_called).to be true + assert_equal true, @callback_called end it "provides handle" do @@ -143,7 +153,7 @@ # Join the producer thread. producer.close - expect(handle).to be @callback_handle + assert_same handle, @callback_handle end end @@ -153,10 +163,8 @@ def call(stats) end end - expect { - producer.delivery_callback = callback.new - }.not_to raise_error - expect(producer.delivery_callback).to respond_to :call + producer.delivery_callback = callback.new + assert_respond_to producer.delivery_callback, :call end it "calls the callback when a message is delivered" do @@ -186,10 +194,10 @@ def call(report) producer.close # Callback should have been called - expect(called_report.first).not_to be_nil - expect(called_report.first.partition).to eq 1 - expect(called_report.first.offset).to be >= 0 - expect(called_report.first.topic_name).to eq topic + refute_nil called_report.first + assert_equal 1, called_report.first.partition + assert_operator called_report.first.offset, :>=, 0 + assert_equal topic, called_report.first.topic_name end it "provides handle" do @@ -219,24 +227,25 @@ def call(_, handle) producer.close # Callback should have been called - expect(handle).to be callback_handles.first + assert_same handle, callback_handles.first end end it "does not accept a callback that's not callable" do - expect { + assert_raises(TypeError) do producer.delivery_callback = "a string" - }.to raise_error(TypeError) + end end end it "requires a topic" do - expect { + e = assert_raises(ArgumentError) do producer.produce( payload: "payload", key: "key" ) - }.to raise_error ArgumentError, /missing keyword: :?topic/ + end + assert_match(/missing keyword: :?topic/, e.message) end it "produces a message" do @@ -249,16 +258,16 @@ def call(_, handle) ) # Should be pending at first - expect(handle.pending?).to be true - expect(handle.label).to eq "label" + assert_equal true, handle.pending? + assert_equal "label", handle.label # Check delivery handle and report report = handle.wait(max_wait_timeout_ms: 5_000) - expect(handle.pending?).to be false - expect(report).not_to be_nil - expect(report.partition).to eq 1 - expect(report.offset).to be >= 0 - expect(report.label).to eq "label" + assert_equal false, handle.pending? + refute_nil report + assert_equal 1, report.partition + assert_operator report.offset, :>=, 0 + assert_equal "label", report.label # Flush and close producer producer.flush @@ -270,10 +279,10 @@ def call(_, handle) delivery_report: report, consumer: consumer ) - expect(message.partition).to eq 1 - expect(message.payload).to eq "payload" - expect(message.key).to eq "key" - expect(message.timestamp).to be_within(10).of(Time.now) + assert_equal 1, message.partition + assert_equal "payload", message.payload + assert_equal "key", message.key + assert_in_delta Time.now, message.timestamp, 10 end it "produces a message with a specified partition" do @@ -292,8 +301,8 @@ def call(_, handle) delivery_report: report, consumer: consumer ) - expect(message.partition).to eq 1 - expect(message.key).to eq "key partition" + assert_equal 1, message.partition + assert_equal "key partition", message.key end it "produces a message to the same partition with a similar partition key" do @@ -323,11 +332,11 @@ def call(_, handle) ) end - expect(messages[0].partition).not_to eq(messages[2].partition) - expect(messages[1].partition).to eq(messages[2].partition) - expect(messages[0].key).to eq key - expect(messages[1].key).to be_nil - expect(messages[2].key).to eq key + refute_equal messages[0].partition, messages[2].partition + assert_equal messages[1].partition, messages[2].partition + assert_equal key, messages[0].key + assert_nil messages[1].key + assert_equal key, messages[2].key end it "produces a message with empty string without crashing" do @@ -348,8 +357,8 @@ def call(_, handle) ) end - expect(messages[0].partition).to be >= 0 - expect(messages[0].key).to eq "a" + assert_operator messages[0].partition, :>=, 0 + assert_equal "a", messages[0].key end it "produces a message with utf-8 encoding" do @@ -367,9 +376,9 @@ def call(_, handle) consumer: consumer ) - expect(message.partition).to eq 1 - expect(message.payload.force_encoding("utf-8")).to eq "Τη γλώσσα μου έδωσαν ελληνική" - expect(message.key).to eq "key utf8" + assert_equal 1, message.partition + assert_equal "Τη γλώσσα μου έδωσαν ελληνική", message.payload.force_encoding("utf-8") + assert_equal "key utf8", message.key end it "produces a message to a non-existing topic with key and partition key" do @@ -385,16 +394,16 @@ def call(_, handle) ) # Should be pending at first - expect(handle.pending?).to be true - expect(handle.label).to eq "label" + assert_equal true, handle.pending? + assert_equal "label", handle.label # Check delivery handle and report report = handle.wait(max_wait_timeout_ms: 5_000) - expect(handle.pending?).to be false - expect(report).not_to be_nil - expect(report.partition).to eq 0 - expect(report.offset).to be >= 0 - expect(report.label).to eq "label" + assert_equal false, handle.pending? + refute_nil report + assert_equal 0, report.partition + assert_operator report.offset, :>=, 0 + assert_equal "label", report.label # Flush and close producer producer.flush @@ -406,22 +415,22 @@ def call(_, handle) delivery_report: report, consumer: consumer ) - expect(message.partition).to eq 0 - expect(message.payload).to eq "payload" - expect(message.key).to eq "key" - expect(message.timestamp).to be_within(10).of(Time.now) + assert_equal 0, message.partition + assert_equal "payload", message.payload + assert_equal "key", message.key + assert_in_delta Time.now, message.timestamp, 10 end context "timestamp" do it "raises a type error if not nil, integer or time" do - expect { + assert_raises(TypeError) do producer.produce( topic: topic, payload: "payload timestamp", key: "key timestamp", timestamp: "10101010" ) - }.to raise_error TypeError + end end it "produces a message with an integer timestamp" do @@ -439,9 +448,9 @@ def call(_, handle) delivery_report: report ) - expect(message.partition).to eq 2 - expect(message.key).to eq "key timestamp" - expect(message.timestamp).to eq Time.at(1505069646, 252_000) + assert_equal 2, message.partition + assert_equal "key timestamp", message.key + assert_equal Time.at(1505069646, 252_000), message.timestamp end it "produces a message with a time timestamp" do @@ -459,9 +468,9 @@ def call(_, handle) delivery_report: report ) - expect(message.partition).to eq 2 - expect(message.key).to eq "key timestamp" - expect(message.timestamp).to eq Time.at(1505069646, 353_000) + assert_equal 2, message.partition + assert_equal "key timestamp", message.key + assert_equal Time.at(1505069646, 353_000), message.timestamp end end @@ -479,8 +488,8 @@ def call(_, handle) consumer: consumer ) - expect(message.key).to be_nil - expect(message.payload).to eq "payload no key" + assert_nil message.key + assert_equal "payload no key", message.payload end it "produces a message with nil payload" do @@ -497,8 +506,8 @@ def call(_, handle) consumer: consumer ) - expect(message.key).to eq "key no payload" - expect(message.payload).to be_nil + assert_equal "key no payload", message.key + assert_nil message.payload end it "produces a message with headers" do @@ -517,11 +526,11 @@ def call(_, handle) consumer: consumer ) - expect(message.payload).to eq "payload headers" - expect(message.key).to eq "key headers" - expect(message.headers["foo"]).to eq "bar" - expect(message.headers["baz"]).to eq "foobar" - expect(message.headers["foobar"]).to be_nil + assert_equal "payload headers", message.payload + assert_equal "key headers", message.key + assert_equal "bar", message.headers["foo"] + assert_equal "foobar", message.headers["baz"] + assert_nil message.headers["foobar"] end it "produces a message with empty headers" do @@ -540,9 +549,9 @@ def call(_, handle) consumer: consumer ) - expect(message.payload).to eq "payload headers" - expect(message.key).to eq "key headers" - expect(message.headers).to be_empty + assert_equal "payload headers", message.payload + assert_equal "key headers", message.key + assert_empty message.headers end it "produces message that aren't waited for and not crash" do @@ -566,7 +575,8 @@ def call(_, handle) end end - it "produces a message in a forked process", skip: defined?(JRUBY_VERSION) && "Kernel#fork is not available" do + it "produces a message in a forked process" do + skip "Kernel#fork is not available" if defined?(JRUBY_VERSION) # Fork, produce a message, send the report over a pipe and # wait for and check the message in the main process. # Force topic creation before forking @@ -577,9 +587,9 @@ def call(_, handle) reader.close # Avoid sharing the client between processes. - producer = rdkafka_producer_config.producer + fork_producer = rdkafka_producer_config.producer - handle = producer.produce( + handle = fork_producer.produce( topic: fork_topic, payload: "payload-forked", key: "key-forked" @@ -595,8 +605,8 @@ def call(_, handle) writer.write(report_json) writer.close - producer.flush - producer.close + fork_producer.flush + fork_producer.close end Process.wait(pid) @@ -616,20 +626,20 @@ def call(_, handle) delivery_report: report, consumer: consumer ) - expect(message.partition).to eq 0 - expect(message.payload).to eq "payload-forked" - expect(message.key).to eq "key-forked" + assert_equal 0, message.partition + assert_equal "payload-forked", message.payload + assert_equal "key-forked", message.key end it "raises an error when producing fails" do - expect(Rdkafka::Bindings).to receive(:rd_kafka_producev).and_return(20) + Rdkafka::Bindings.expects(:rd_kafka_producev).returns(20) - expect { + assert_raises(Rdkafka::RdkafkaError) do producer.produce( topic: topic, key: "key error" ) - }.to raise_error Rdkafka::RdkafkaError + end end it "raises a timeout error when waiting too long" do @@ -642,9 +652,9 @@ def call(_, handle) # With a warmed-up broker connection the message may already be delivered # before we get to call wait, so only assert timeout if still pending if handle[:pending] - expect { + assert_raises(Rdkafka::Producer::DeliveryHandle::WaitTimeoutError) do handle.wait(max_wait_timeout_ms: 0) - }.to raise_error Rdkafka::Producer::DeliveryHandle::WaitTimeoutError + end end # Waiting with a real timeout should always work @@ -665,7 +675,7 @@ def call(_, handle) events_poll_nb_each: :no_args }.each do |method, args| it "raises an exception if #{method} is called" do - expect { + e = assert_raises(Rdkafka::ClosedProducerError) do if args == :no_args producer.public_send(method) elsif args.is_a?(Hash) @@ -673,111 +683,119 @@ def call(_, handle) else producer.public_send(method, args) end - }.to raise_exception(Rdkafka::ClosedProducerError, /#{method}/) + end + assert_match(/#{method}/, e.message) end end end context "when not being able to deliver the message" do - let(:producer) do - rdkafka_producer_config( + it "contains the error in the response when not deliverable" do + @producer = rdkafka_producer_config( "bootstrap.servers": "127.0.0.1:9095", "message.timeout.ms": 100 ).producer - end - it "contains the error in the response when not deliverable" do handler = producer.produce(topic: topic, payload: nil, label: "na") # Wait for the async callbacks and delivery registry to update sleep(2) - expect(handler.create_result.error).to be_a(Rdkafka::RdkafkaError) - expect(handler.create_result.label).to eq("na") + assert_kind_of Rdkafka::RdkafkaError, handler.create_result.error + assert_equal "na", handler.create_result.label end end context "when topic does not exist and allow.auto.create.topics is false" do - let(:producer) do - rdkafka_producer_config( + it "contains the error in the response when not deliverable" do + @producer = rdkafka_producer_config( "bootstrap.servers": "127.0.0.1:9092", "message.timeout.ms": 100, "allow.auto.create.topics": false ).producer - end - it "contains the error in the response when not deliverable" do handler = producer.produce(topic: "it-#{SecureRandom.uuid}", payload: nil, label: "na") # Wait for the async callbacks and delivery registry to update sleep(2) - expect(handler.create_result.error).to be_a(Rdkafka::RdkafkaError) - expect(handler.create_result.error.code).to eq(:msg_timed_out) - expect(handler.create_result.label).to eq("na") + assert_kind_of Rdkafka::RdkafkaError, handler.create_result.error + assert_equal :msg_timed_out, handler.create_result.error.code + assert_equal "na", handler.create_result.label end end describe "#partition_count" do - it { expect(producer.partition_count(TestTopics.example_topic)).to eq(1) } + it "returns partition count" do + assert_equal 1, producer.partition_count(TestTopics.example_topic) + end context "when the partition count value is already cached" do before do producer.partition_count(TestTopics.example_topic) - allow(::Rdkafka::Metadata).to receive(:new).and_call_original end it "expect not to query it again" do + ::Rdkafka::Metadata.expects(:new).never producer.partition_count(TestTopics.example_topic) - expect(::Rdkafka::Metadata).not_to have_received(:new) end end context "when the partition count value was cached but time expired" do before do ::Rdkafka::Producer.partitions_count_cache = Rdkafka::Producer::PartitionsCountCache.new - allow(::Rdkafka::Metadata).to receive(:new).and_call_original end it "expect to query it again" do + ::Rdkafka::Metadata.expects(:new).returns(mock("metadata", topics: [{ partition_count: 1 }])) producer.partition_count(TestTopics.example_topic) - expect(::Rdkafka::Metadata).to have_received(:new) end end context "when the partition count value was cached and time did not expire" do before do - allow(::Process).to receive(:clock_gettime).and_return(0, 29.001) + ::Process.stubs(:clock_gettime).returns(0, 29.001) producer.partition_count(TestTopics.example_topic) - allow(::Rdkafka::Metadata).to receive(:new).and_call_original end it "expect not to query it again" do + ::Rdkafka::Metadata.expects(:new).never producer.partition_count(TestTopics.example_topic) - expect(::Rdkafka::Metadata).not_to have_received(:new) end end end describe "metadata fetch request recovery" do - let(:partition_count) { producer.partition_count(TestTopics.example_topic) } - describe "metadata initialization recovery" do context "when all good" do - it { expect(partition_count).to eq(1) } + it "returns partition count" do + assert_equal 1, producer.partition_count(TestTopics.example_topic) + end end context "when we fail for the first time with handled error" do - before do - raised = false - - allow(Rdkafka::Bindings).to receive(:rd_kafka_metadata).and_wrap_original do |m, *args| - if raised - m.call(*args) - else - raised = true + it "returns partition count" do + original = Rdkafka::Bindings.method(:rd_kafka_metadata) + call_count = 0 + meta = Rdkafka::Bindings.singleton_class + + verbose_was, $VERBOSE = $VERBOSE, nil + meta.send(:remove_method, :rd_kafka_metadata) + meta.send(:define_method, :rd_kafka_metadata) do |*args| + call_count += 1 + if call_count == 1 -185 + else + original.call(*args) end end + $VERBOSE = verbose_was + + begin + assert_equal 1, producer.partition_count(TestTopics.example_topic) + ensure + verbose_was, $VERBOSE = $VERBOSE, nil + meta.send(:remove_method, :rd_kafka_metadata) + meta.send(:define_method, :rd_kafka_metadata, original) + $VERBOSE = verbose_was + end end - - it { expect(partition_count).to eq(1) } end end end @@ -791,17 +809,10 @@ def call(_, handle) headers: {} ) - expect(producer.flush(5_000)).to be(true) + assert_equal true, producer.flush(5_000) end context "when it cannot flush due to a timeout" do - let(:producer) do - rdkafka_producer_config( - "bootstrap.servers": "127.0.0.1:9095", - "message.timeout.ms": 2_000 - ).producer - end - after do # Allow rdkafka to evict message preventing memory-leak # We give it a bit more time as on slow CIs things take time @@ -809,6 +820,11 @@ def call(_, handle) end it "returns false on flush when cannot deliver and beyond timeout" do + @producer = rdkafka_producer_config( + "bootstrap.servers": "127.0.0.1:9095", + "message.timeout.ms": 2_000 + ).producer + producer.produce( topic: topic, payload: "payload headers", @@ -816,79 +832,94 @@ def call(_, handle) headers: {} ) - expect(producer.flush(1_000)).to be(false) + assert_equal false, producer.flush(1_000) end end context "when there is a different error" do - before { allow(Rdkafka::Bindings).to receive(:rd_kafka_flush).and_return(-199) } + before { Rdkafka::Bindings.stubs(:rd_kafka_flush).returns(-199) } it "raises it" do - expect { producer.flush }.to raise_error(Rdkafka::RdkafkaError) + assert_raises(Rdkafka::RdkafkaError) do + producer.flush + end end end end describe "#purge" do context "when no outgoing messages" do - it { expect(producer.purge).to be(true) } + it "returns true" do + assert_equal true, producer.purge + end end context "when librdkafka purge returns an error" do - before { expect(Rdkafka::Bindings).to receive(:rd_kafka_purge).and_return(-153) } + before { Rdkafka::Bindings.expects(:rd_kafka_purge).returns(-153) } it "expect to raise an error" do - expect { producer.purge }.to raise_error(Rdkafka::RdkafkaError, /retry/) + e = assert_raises(Rdkafka::RdkafkaError) do + producer.purge + end + assert_match(/retry/, e.message) end end context "when there are outgoing things in the queue" do - let(:producer) do - rdkafka_producer_config( + it "shoulds purge and move forward" do + @producer = rdkafka_producer_config( "bootstrap.servers": "127.0.0.1:9095", "message.timeout.ms": 2_000 ).producer - end - it "shoulds purge and move forward" do producer.produce( topic: topic, payload: "payload headers" ) - expect(producer.purge).to be(true) - expect(producer.flush(1_000)).to be(true) + assert_equal true, producer.purge + assert_equal true, producer.flush(1_000) end it "materializes the delivery handles" do + @producer = rdkafka_producer_config( + "bootstrap.servers": "127.0.0.1:9095", + "message.timeout.ms": 2_000 + ).producer + handle = producer.produce( topic: topic, payload: "payload headers" ) - expect(producer.purge).to be(true) + assert_equal true, producer.purge - expect { handle.wait }.to raise_error(Rdkafka::RdkafkaError, /purge_queue/) + e = assert_raises(Rdkafka::RdkafkaError) do + handle.wait + end + assert_match(/purge_queue/, e.message) end context "when using delivery_callback" do - let(:delivery_reports) { [] } + it "runs the callback" do + @producer = rdkafka_producer_config( + "bootstrap.servers": "127.0.0.1:9095", + "message.timeout.ms": 2_000 + ).producer - let(:delivery_callback) do - ->(delivery_report) { delivery_reports << delivery_report } - end + delivery_reports = [] + delivery_callback = ->(delivery_report) { delivery_reports << delivery_report } - before { producer.delivery_callback = delivery_callback } + producer.delivery_callback = delivery_callback - it "runs the callback" do producer.produce( topic: topic, payload: "payload headers" ) - expect(producer.purge).to be(true) + assert_equal true, producer.purge # queue purge - expect(delivery_reports[0].error).to eq(-152) + assert_equal(-152, delivery_reports[0].error) end end end @@ -896,7 +927,7 @@ def call(_, handle) describe "#queue_size" do it "returns 0 when there are no pending messages" do - expect(producer.queue_size).to eq(0) + assert_equal 0, producer.queue_size end it "returns a positive number when there are pending messages" do @@ -918,7 +949,7 @@ def call(_, handle) sleep(0.1) queue_size = slow_producer.queue_size - expect(queue_size).to be > 0 + assert_operator queue_size, :>, 0 ensure slow_producer.close end @@ -932,16 +963,16 @@ def call(_, handle) producer.flush(5_000) - expect(producer.queue_size).to eq(0) + assert_equal 0, producer.queue_size end describe "#queue_length alias" do it "is an alias for queue_size" do - expect(producer.method(:queue_length)).to eq(producer.method(:queue_size)) + assert_equal producer.method(:queue_length), producer.method(:queue_size) end it "returns the same value as queue_size" do - expect(producer.queue_length).to eq(producer.queue_size) + assert_equal producer.queue_length, producer.queue_size end end end @@ -954,36 +985,36 @@ def call(_, handle) lifetime_ms: Time.now.to_i * 1000 + 900 * 1000, principal_name: "kafka-cluster" ) - expect(response).to eq(Rdkafka::Bindings::RD_KAFKA_RESP_ERR__STATE) + assert_equal Rdkafka::Bindings::RD_KAFKA_RESP_ERR__STATE, response end end context "when sasl configured" do before do - $producer_sasl = rdkafka_producer_config( + @producer_sasl = rdkafka_producer_config( "security.protocol": "sasl_ssl", "sasl.mechanisms": "OAUTHBEARER" ).producer end after do - $producer_sasl.close + @producer_sasl.close end context "without extensions" do it "succeeds" do - response = $producer_sasl.oauthbearer_set_token( + response = @producer_sasl.oauthbearer_set_token( token: "foo", lifetime_ms: Time.now.to_i * 1000 + 900 * 1000, principal_name: "kafka-cluster" ) - expect(response).to eq(0) + assert_equal 0, response end end context "with extensions" do it "succeeds" do - response = $producer_sasl.oauthbearer_set_token( + response = @producer_sasl.oauthbearer_set_token( token: "foo", lifetime_ms: Time.now.to_i * 1000 + 900 * 1000, principal_name: "kafka-cluster", @@ -991,7 +1022,7 @@ def call(_, handle) "foo" => "bar" } ) - expect(response).to eq(0) + assert_equal 0, response end end end @@ -1011,10 +1042,10 @@ def call(_, handle) ).wait message = wait_for_message(topic: topic, consumer: consumer, delivery_report: report) - expect(message).to be - expect(message.key).to eq("key headers") - expect(message.headers["type"]).to eq("String") - expect(message.headers["version"]).to eq(["2.1.3", "2.1.4"]) + refute_nil message + assert_equal "key headers", message.key + assert_equal "String", message.headers["type"] + assert_equal ["2.1.3", "2.1.4"], message.headers["version"] end it "produces a message with single value headers" do @@ -1030,124 +1061,106 @@ def call(_, handle) ).wait message = wait_for_message(topic: topic, consumer: consumer, delivery_report: report) - expect(message).to be - expect(message.key).to eq("key headers") - expect(message.headers["type"]).to eq("String") - expect(message.headers["version"]).to eq("2.1.3") + refute_nil message + assert_equal "key headers", message.key + assert_equal "String", message.headers["type"] + assert_equal "2.1.3", message.headers["version"] end end describe "with active statistics callback" do - let(:producer) do - rdkafka_producer_config("statistics.interval.ms": 1_000).producer - end + it "expect to update ttl on the partitions count cache via statistics" do + @producer = rdkafka_producer_config("statistics.interval.ms": 1_000).producer + Rdkafka::Config.statistics_callback = ->(*) {} - let(:count_cache_hash) { described_class.partitions_count_cache.to_h } - let(:pre_statistics_ttl) { count_cache_hash.fetch(topic, [])[0] } - let(:post_statistics_ttl) { count_cache_hash.fetch(topic, [])[0] } - - context "when using partition key" do - before do - Rdkafka::Config.statistics_callback = ->(*) {} - - # This call will make a blocking request to the metadata cache - producer.produce( - topic: topic, - payload: "payload headers", - partition_key: "test" - ).wait + # This call will make a blocking request to the metadata cache + producer.produce( + topic: topic, + payload: "payload headers", + partition_key: "test" + ).wait - pre_statistics_ttl + count_cache_hash = described_class.partitions_count_cache.to_h + pre_statistics_ttl = count_cache_hash.fetch(topic, [])[0] - # We wait to make sure that statistics are triggered and that there is a refresh - sleep(1.5) + # We wait to make sure that statistics are triggered and that there is a refresh + sleep(1.5) - post_statistics_ttl - end + count_cache_hash = described_class.partitions_count_cache.to_h + post_statistics_ttl = count_cache_hash.fetch(topic, [])[0] - it "expect to update ttl on the partitions count cache via statistics" do - expect(pre_statistics_ttl).to be < post_statistics_ttl - end + assert_operator pre_statistics_ttl, :<, post_statistics_ttl end - context "when not using partition key" do - before do - Rdkafka::Config.statistics_callback = ->(*) {} + it "expect not to update ttl on the partitions count cache via blocking but via use stats" do + @producer = rdkafka_producer_config("statistics.interval.ms": 1_000).producer + Rdkafka::Config.statistics_callback = ->(*) {} - # This call will make a blocking request to the metadata cache - producer.produce( - topic: topic, - payload: "payload headers" - ).wait + # This call will make a blocking request to the metadata cache + producer.produce( + topic: topic, + payload: "payload headers" + ).wait - pre_statistics_ttl + count_cache_hash = described_class.partitions_count_cache.to_h + pre_statistics_ttl = count_cache_hash.fetch(topic, [])[0] - # We wait to make sure that statistics are triggered and that there is a refresh - sleep(1.5) + # We wait to make sure that statistics are triggered and that there is a refresh + sleep(1.5) - # This will anyhow be populated from statistic - post_statistics_ttl - end + # This will anyhow be populated from statistic + count_cache_hash = described_class.partitions_count_cache.to_h + post_statistics_ttl = count_cache_hash.fetch(topic, [])[0] - it "expect not to update ttl on the partitions count cache via blocking but via use stats" do - expect(pre_statistics_ttl).to be_nil - expect(post_statistics_ttl).not_to be_nil - end + assert_nil pre_statistics_ttl + refute_nil post_statistics_ttl end end describe "without active statistics callback" do - let(:producer) do - rdkafka_producer_config("statistics.interval.ms": 1_000).producer - end - - let(:count_cache_hash) { described_class.partitions_count_cache.to_h } - let(:pre_statistics_ttl) { count_cache_hash.fetch(topic, [])[0] } - let(:post_statistics_ttl) { count_cache_hash.fetch(topic, [])[0] } + it "expect not to update ttl on the partitions count cache via statistics" do + @producer = rdkafka_producer_config("statistics.interval.ms": 1_000).producer - context "when using partition key" do - before do - # This call will make a blocking request to the metadata cache - producer.produce( - topic: topic, - payload: "payload headers", - partition_key: "test" - ).wait + # This call will make a blocking request to the metadata cache + producer.produce( + topic: topic, + payload: "payload headers", + partition_key: "test" + ).wait - pre_statistics_ttl + count_cache_hash = described_class.partitions_count_cache.to_h + pre_statistics_ttl = count_cache_hash.fetch(topic, [])[0] - # We wait to make sure that statistics are triggered and that there is a refresh - sleep(1.5) + # We wait to make sure that statistics are triggered and that there is a refresh + sleep(1.5) - post_statistics_ttl - end + count_cache_hash = described_class.partitions_count_cache.to_h + post_statistics_ttl = count_cache_hash.fetch(topic, [])[0] - it "expect not to update ttl on the partitions count cache via statistics" do - expect(pre_statistics_ttl).to eq post_statistics_ttl - end + assert_equal pre_statistics_ttl, post_statistics_ttl end - context "when not using partition key" do - before do - # This call will make a blocking request to the metadata cache - producer.produce( - topic: topic, - payload: "payload headers" - ).wait + it "expect not to update ttl on the partitions count cache via anything" do + @producer = rdkafka_producer_config("statistics.interval.ms": 1_000).producer - pre_statistics_ttl + # This call will make a blocking request to the metadata cache + producer.produce( + topic: topic, + payload: "payload headers" + ).wait - # We wait to make sure that statistics are triggered and that there is a refresh - sleep(1.5) + count_cache_hash = described_class.partitions_count_cache.to_h + pre_statistics_ttl = count_cache_hash.fetch(topic, [])[0] - # This should not be populated because stats are not in use - post_statistics_ttl - end + # We wait to make sure that statistics are triggered and that there is a refresh + sleep(1.5) - it "expect not to update ttl on the partitions count cache via anything" do - expect(pre_statistics_ttl).to be_nil - expect(post_statistics_ttl).to be_nil - end + # This should not be populated because stats are not in use + count_cache_hash = described_class.partitions_count_cache.to_h + post_statistics_ttl = count_cache_hash.fetch(topic, [])[0] + + assert_nil pre_statistics_ttl + assert_nil post_statistics_ttl end end @@ -1155,11 +1168,11 @@ def call(_, handle) context "when we create many fibers and close producer in some of them" do it "expect not to crash ruby" do 10.times do |i| - producer = rdkafka_producer_config.producer + fiber_producer = rdkafka_producer_config.producer Fiber.new do GC.start - producer.close + fiber_producer.close end.resume end end @@ -1172,7 +1185,7 @@ def call(_, handle) test_key = "test-key-123" results = {} - all_partitioners.each do |partitioner| + @all_partitioners.each do |partitioner| handle = producer.produce( topic: topic_25, payload: "test payload", @@ -1186,13 +1199,13 @@ def call(_, handle) # Should not all be the same partition (especially not all 0) unique_partitions = results.values.uniq - expect(unique_partitions.size).to be > 1 + assert_operator unique_partitions.size, :>, 1 end end context "empty string partition key" do it "produces message with empty partition key without crashing and go to partition 0 for all partitioners" do - all_partitioners.each do |partitioner| + @all_partitioners.each do |partitioner| handle = producer.produce( topic: topic_25, payload: "test payload", @@ -1202,7 +1215,7 @@ def call(_, handle) ) report = handle.wait(max_wait_timeout_ms: 5_000) - expect(report.partition).to be >= 0 + assert_operator report.partition, :>=, 0 end end end @@ -1217,14 +1230,14 @@ def call(_, handle) ) report = handle.wait(max_wait_timeout_ms: 5_000) - expect(report.partition).to be >= 0 - expect(report.partition).to be < producer.partition_count(topic_25) + assert_operator report.partition, :>=, 0 + assert_operator report.partition, :<, producer.partition_count(topic_25) end end context "various key types and lengths with different partitioners" do it "handles very short keys with all partitioners" do - all_partitioners.each do |partitioner| + @all_partitioners.each do |partitioner| handle = producer.produce( topic: topic_25, payload: "test payload", @@ -1233,15 +1246,15 @@ def call(_, handle) ) report = handle.wait(max_wait_timeout_ms: 5_000) - expect(report.partition).to be >= 0 - expect(report.partition).to be < producer.partition_count(topic_25) + assert_operator report.partition, :>=, 0 + assert_operator report.partition, :<, producer.partition_count(topic_25) end end it "handles very long keys with all partitioners" do long_key = "a" * 1000 - all_partitioners.each do |partitioner| + @all_partitioners.each do |partitioner| handle = producer.produce( topic: topic_25, payload: "test payload", @@ -1250,15 +1263,15 @@ def call(_, handle) ) report = handle.wait(max_wait_timeout_ms: 5_000) - expect(report.partition).to be >= 0 - expect(report.partition).to be < producer.partition_count(topic_25) + assert_operator report.partition, :>=, 0 + assert_operator report.partition, :<, producer.partition_count(topic_25) end end it "handles unicode keys with all partitioners" do unicode_key = "测试键值🚀" - all_partitioners.each do |partitioner| + @all_partitioners.each do |partitioner| handle = producer.produce( topic: topic_25, payload: "test payload", @@ -1267,8 +1280,8 @@ def call(_, handle) ) report = handle.wait(max_wait_timeout_ms: 5_000) - expect(report.partition).to be >= 0 - expect(report.partition).to be < producer.partition_count(topic_25) + assert_operator report.partition, :>=, 0 + assert_operator report.partition, :<, producer.partition_count(topic_25) end end end @@ -1291,7 +1304,7 @@ def call(_, handle) # All should go to same partition partitions = reports.map(&:partition).uniq - expect(partitions.size).to eq(1) + assert_equal 1, partitions.size end end end @@ -1316,8 +1329,8 @@ def call(_, handle) # Just ensure they're valid partitions partitions.each do |partition| - expect(partition).to be >= 0 - expect(partition).to be < producer.partition_count(topic_25) + assert_operator partition, :>=, 0 + assert_operator partition, :<, producer.partition_count(topic_25) end end end @@ -1327,7 +1340,7 @@ def call(_, handle) it "routes different partition keys to potentially different partitions" do keys = ["key1", "key2", "key3", "key4", "key5"] - all_partitioners.each do |partitioner| + @all_partitioners.each do |partitioner| reports = keys.map do |key| handle = producer.produce( topic: topic_25, @@ -1342,7 +1355,7 @@ def call(_, handle) # Should distribute across multiple partitions for most partitioners # (though some might hash all keys to same partition by chance) - expect(partitions.all? { |p| p >= 0 && p < producer.partition_count(topic_25) }).to be true + assert_equal true, partitions.all? { |p| p >= 0 && p < producer.partition_count(topic_25) } end end end @@ -1380,16 +1393,16 @@ def call(_, handle) report3 = handle3.wait(max_wait_timeout_ms: 5_000) # Messages 1 and 2 should go to same partition (both use partition_key) - expect(report1.partition).to eq(report2.partition) + assert_equal report1.partition, report2.partition # Message 3 should potentially go to different partition (uses regular key) - expect(report3.partition).not_to eq(report1.partition) + refute_equal report1.partition, report3.partition end end context "edge case combinations with different partitioners" do it "handles nil partition key with all partitioners" do - all_partitioners.each do |partitioner| + @all_partitioners.each do |partitioner| handle = producer.produce( topic: topic_25, payload: "test payload", @@ -1399,13 +1412,13 @@ def call(_, handle) ) report = handle.wait(max_wait_timeout_ms: 5_000) - expect(report.partition).to be >= 0 - expect(report.partition).to be < producer.partition_count(topic_25) + assert_operator report.partition, :>=, 0 + assert_operator report.partition, :<, producer.partition_count(topic_25) end end it "handles whitespace-only partition key with all partitioners" do - all_partitioners.each do |partitioner| + @all_partitioners.each do |partitioner| handle = producer.produce( topic: topic_25, payload: "test payload", @@ -1414,13 +1427,13 @@ def call(_, handle) ) report = handle.wait(max_wait_timeout_ms: 5_000) - expect(report.partition).to be >= 0 - expect(report.partition).to be < producer.partition_count(topic_25) + assert_operator report.partition, :>=, 0 + assert_operator report.partition, :<, producer.partition_count(topic_25) end end it "handles newline characters in partition key with all partitioners" do - all_partitioners.each do |partitioner| + @all_partitioners.each do |partitioner| handle = producer.produce( topic: topic_25, payload: "test payload", @@ -1429,8 +1442,8 @@ def call(_, handle) ) report = handle.wait(max_wait_timeout_ms: 5_000) - expect(report.partition).to be >= 0 - expect(report.partition).to be < producer.partition_count(topic_25) + assert_operator report.partition, :>=, 0 + assert_operator report.partition, :<, producer.partition_count(topic_25) end end end @@ -1440,7 +1453,7 @@ def call(_, handle) test_key = "debug-test-key" zero_count = 0 - all_partitioners.each do |partitioner| + @all_partitioners.each do |partitioner| handle = producer.produce( topic: topic_25, payload: "debug payload", @@ -1452,14 +1465,14 @@ def call(_, handle) zero_count += 1 if report.partition == 0 end - expect(zero_count).to be < all_partitioners.size + assert_operator zero_count, :<, @all_partitioners.size end end end describe "#events_poll_nb_each" do it "does not raise when queue is empty" do - expect { producer.events_poll_nb_each { |_| } }.not_to raise_error + producer.events_poll_nb_each { |_| } end it "processes delivery callbacks" do @@ -1477,58 +1490,63 @@ def call(_, handle) # events_poll_nb_each should process any pending callbacks producer.events_poll_nb_each { |_| } - expect(callback_called).to be(true) + assert_equal true, callback_called end it "yields the count after each poll" do counts = [] # Stub to return events, then zero call_count = 0 - allow(Rdkafka::Bindings).to receive(:rd_kafka_poll_nb) do + Rdkafka::Bindings.stubs(:rd_kafka_poll_nb).with do call_count += 1 - (call_count <= 2) ? 1 : 0 - end + true + end.returns(1, 1, 0) producer.events_poll_nb_each { |count| counts << count } - expect(counts).to eq([1, 1]) + assert_equal [1, 1], counts end it "stops when block returns :stop" do iterations = 0 # Stub to always return events - allow(Rdkafka::Bindings).to receive(:rd_kafka_poll_nb).and_return(1) + Rdkafka::Bindings.stubs(:rd_kafka_poll_nb).returns(1) producer.events_poll_nb_each do |_count| iterations += 1 :stop if iterations >= 3 end - expect(iterations).to eq(3) + assert_equal 3, iterations end context "when producer is closed" do before { producer.close } it "raises ClosedProducerError" do - expect { producer.events_poll_nb_each { |_| } }.to raise_error(Rdkafka::ClosedProducerError, /events_poll_nb_each/) + e = assert_raises(Rdkafka::ClosedProducerError) do + producer.events_poll_nb_each { |_| } + end + assert_match(/events_poll_nb_each/, e.message) end end end describe "file descriptor access for fiber scheduler integration" do - let(:producer) { rdkafka_producer_config.producer(run_polling_thread: false) } + before do + @producer = rdkafka_producer_config.producer(run_polling_thread: false) + end it "enables IO events on producer queue" do signal_r, signal_w = IO.pipe - expect { producer.enable_queue_io_events(signal_w.fileno) }.not_to raise_error + producer.enable_queue_io_events(signal_w.fileno) signal_r.close signal_w.close end it "enables IO events on background queue" do signal_r, signal_w = IO.pipe - expect { producer.enable_background_queue_io_events(signal_w.fileno) }.not_to raise_error + producer.enable_background_queue_io_events(signal_w.fileno) signal_r.close signal_w.close end @@ -1538,14 +1556,18 @@ def call(_, handle) it "raises ClosedInnerError when enabling queue_io_events" do signal_r, signal_w = IO.pipe - expect { producer.enable_queue_io_events(signal_w.fileno) }.to raise_error(Rdkafka::ClosedInnerError) + assert_raises(Rdkafka::ClosedInnerError) do + producer.enable_queue_io_events(signal_w.fileno) + end signal_r.close signal_w.close end it "raises ClosedInnerError when enabling background_queue_io_events" do signal_r, signal_w = IO.pipe - expect { producer.enable_background_queue_io_events(signal_w.fileno) }.to raise_error(Rdkafka::ClosedInnerError) + assert_raises(Rdkafka::ClosedInnerError) do + producer.enable_background_queue_io_events(signal_w.fileno) + end signal_r.close signal_w.close end diff --git a/spec/support/kafka_config_helpers.rb b/test/support/kafka_config_helpers.rb similarity index 100% rename from spec/support/kafka_config_helpers.rb rename to test/support/kafka_config_helpers.rb diff --git a/spec/support/kafka_wait_helpers.rb b/test/support/kafka_wait_helpers.rb similarity index 100% rename from spec/support/kafka_wait_helpers.rb rename to test/support/kafka_wait_helpers.rb diff --git a/spec/support/native_client_helpers.rb b/test/support/native_client_helpers.rb similarity index 100% rename from spec/support/native_client_helpers.rb rename to test/support/native_client_helpers.rb diff --git a/spec/support/test_topics.rb b/test/support/test_topics.rb similarity index 100% rename from spec/support/test_topics.rb rename to test/support/test_topics.rb diff --git a/test/test_helper.rb b/test/test_helper.rb new file mode 100644 index 00000000..d43b91fb --- /dev/null +++ b/test/test_helper.rb @@ -0,0 +1,126 @@ +# frozen_string_literal: true + +Warning[:performance] = true if RUBY_VERSION >= "3.3" +Warning[:deprecated] = true +$VERBOSE = true + +require "warning" + +Warning.process do |warning| + next unless warning.include?(Dir.pwd) + # Allow OpenStruct usage only in tests + next if warning.include?("OpenStruct use") && warning.include?("_test") + + raise "Warning in your code: #{warning}" +end + +unless ENV["CI"] == "true" + require "simplecov" + SimpleCov.start do + add_filter "/test/" + end +end + +require "minitest/autorun" +require "mocha/minitest" +require "pry" +require "rdkafka" +require "timeout" +require "securerandom" + +# Load support modules +require_relative "support/kafka_config_helpers" +require_relative "support/kafka_wait_helpers" +require_relative "support/native_client_helpers" +require_relative "support/test_topics" + +# Add `context` as an alias for `describe` in minitest/spec +Minitest::Spec.class_eval do + class << self + alias_method :context, :describe + end +end + +# Provide `described_class` helper that walks up the Minitest::Spec hierarchy +module DescribedClassHelper + def described_class + klass = self.class + while klass && klass < Minitest::Spec + return klass.desc if klass.desc.is_a?(Class) || klass.desc.is_a?(Module) + klass = klass.superclass + end + nil + end +end + +# Provide `with_stubbed_const` for stub_const replacement +module StubbedConstHelper + def with_stubbed_const(mod, const_name, temp_value) + old_value = mod.const_get(const_name) + mod.send(:remove_const, const_name) + mod.const_set(const_name, temp_value) + yield + ensure + mod.send(:remove_const, const_name) + mod.const_set(const_name, old_value) + end +end + +# 90-second timeout wrapper per test +module TimeoutWrapper + def run + Timeout.timeout(90) do + super + end + end +end + +Minitest::Test.prepend(TimeoutWrapper) + +# Include support modules into all specs +Minitest::Spec.class_eval do + include KafkaConfigHelpers + include KafkaWaitHelpers + include NativeClientHelpers + include DescribedClassHelper + include StubbedConstHelper +end + +# One-time suite setup: create example topic before any tests run +SUITE_SETUP_MUTEX = Mutex.new +SUITE_SETUP_DONE = { value: false } + +module SuiteSetup + def before_setup + super + SUITE_SETUP_MUTEX.synchronize do + unless SUITE_SETUP_DONE[:value] + admin = KafkaConfigHelpers.rdkafka_config.admin + begin + create_topic_handle = admin.create_topic(TestTopics.example_topic, 1, 1) + create_topic_handle.wait(max_wait_timeout_ms: 1_000) + rescue Rdkafka::RdkafkaError => ex + raise unless ex.message.match?(/topic_already_exists/) + ensure + admin.close + end + SUITE_SETUP_DONE[:value] = true + end + end + end +end + +Minitest::Spec.prepend(SuiteSetup) + +# Global before each: clear callbacks and partition cache +module GlobalBeforeEach + def setup + super + Rdkafka::Config.statistics_callback = nil + Rdkafka::Config.error_callback = nil + Rdkafka::Config.oauthbearer_token_refresh_callback = nil + Rdkafka::Producer.partitions_count_cache.to_h.clear + end +end + +Minitest::Spec.prepend(GlobalBeforeEach) From 071dbc35b75c7b10d3f367c334d337f723783ad0 Mon Sep 17 00:00:00 2001 From: Maciej Mensfeld Date: Wed, 18 Mar 2026 13:37:23 +0100 Subject: [PATCH 2/3] switch to minitest --- test/integrations/librdkafka_admin_features_test.rb | 10 +++++----- test/integrations/librdkafka_consumer_features_test.rb | 10 +++++----- test/integrations/librdkafka_producer_features_test.rb | 10 +++++----- test/integrations/unregistered_scheme_file_test.rb | 4 ++-- test/lib/rdkafka/consumer/headers_test.rb | 2 +- 5 files changed, 18 insertions(+), 18 deletions(-) diff --git a/test/integrations/librdkafka_admin_features_test.rb b/test/integrations/librdkafka_admin_features_test.rb index 0cca7d7a..d21383a5 100644 --- a/test/integrations/librdkafka_admin_features_test.rb +++ b/test/integrations/librdkafka_admin_features_test.rb @@ -70,14 +70,14 @@ # Verify all expected features are present expected = if ENV["RDKAFKA_PRECOMPILED"] == "true" - ADMIN_PRECOMPILED_FEATURES - else - ADMIN_EXPECTED_BUILTIN_FEATURES - end + ADMIN_PRECOMPILED_FEATURES + else + ADMIN_EXPECTED_BUILTIN_FEATURES + end missing_features = expected - actual_features assert_empty missing_features, - "Missing expected builtin features: #{missing_features.join(', ')}" + "Missing expected builtin features: #{missing_features.join(", ")}" end end diff --git a/test/integrations/librdkafka_consumer_features_test.rb b/test/integrations/librdkafka_consumer_features_test.rb index 55bfa051..23d50993 100644 --- a/test/integrations/librdkafka_consumer_features_test.rb +++ b/test/integrations/librdkafka_consumer_features_test.rb @@ -71,14 +71,14 @@ # Verify all expected features are present expected = if ENV["RDKAFKA_PRECOMPILED"] == "true" - CONSUMER_PRECOMPILED_FEATURES - else - CONSUMER_EXPECTED_BUILTIN_FEATURES - end + CONSUMER_PRECOMPILED_FEATURES + else + CONSUMER_EXPECTED_BUILTIN_FEATURES + end missing_features = expected - actual_features assert_empty missing_features, - "Missing expected builtin features: #{missing_features.join(', ')}" + "Missing expected builtin features: #{missing_features.join(", ")}" end end diff --git a/test/integrations/librdkafka_producer_features_test.rb b/test/integrations/librdkafka_producer_features_test.rb index 2339bfee..3cef6d3c 100644 --- a/test/integrations/librdkafka_producer_features_test.rb +++ b/test/integrations/librdkafka_producer_features_test.rb @@ -70,14 +70,14 @@ # Verify all expected features are present expected = if ENV["RDKAFKA_PRECOMPILED"] == "true" - PRODUCER_PRECOMPILED_FEATURES - else - PRODUCER_EXPECTED_BUILTIN_FEATURES - end + PRODUCER_PRECOMPILED_FEATURES + else + PRODUCER_EXPECTED_BUILTIN_FEATURES + end missing_features = expected - actual_features assert_empty missing_features, - "Missing expected builtin features: #{missing_features.join(', ')}" + "Missing expected builtin features: #{missing_features.join(", ")}" end end diff --git a/test/integrations/unregistered_scheme_file_test.rb b/test/integrations/unregistered_scheme_file_test.rb index d3a02102..d74b9bfd 100644 --- a/test/integrations/unregistered_scheme_file_test.rb +++ b/test/integrations/unregistered_scheme_file_test.rb @@ -109,9 +109,9 @@ log_lines.each do |line| refute_includes line, "routines::unregistered scheme", - "Found 'unregistered scheme' error in SSL logs" + "Found 'unregistered scheme' error in SSL logs" refute_includes line, "system library::No such file or directory", - "Found 'No such file or directory' error in SSL logs" + "Found 'No such file or directory' error in SSL logs" end end end diff --git a/test/lib/rdkafka/consumer/headers_test.rb b/test/lib/rdkafka/consumer/headers_test.rb index 8388b4a8..d7959f4f 100644 --- a/test/lib/rdkafka/consumer/headers_test.rb +++ b/test/lib/rdkafka/consumer/headers_test.rb @@ -22,7 +22,7 @@ header_data = [ { name: "version", value: "2.1.3" }, { name: "version", value: "2.1.4" }, - { name: "type", value: "String" } + { name: "type", value: "String" } ] headers_ptr = @headers_ptr From d969951ce73286655ea9962a84f7a66ce4a44525 Mon Sep 17 00:00:00 2001 From: Maciej Mensfeld Date: Thu, 19 Mar 2026 09:05:10 +0100 Subject: [PATCH 3/3] wip --- .../librdkafka_admin_features_test.rb | 91 ++++----- .../librdkafka_consumer_features_test.rb | 93 ++++----- .../librdkafka_producer_features_test.rb | 91 ++++----- test/integrations/ssl_stress_test.rb | 162 +++++++-------- .../unregistered_scheme_file_test.rb | 185 +++++++++--------- test/test_helper.rb | 10 + 6 files changed, 316 insertions(+), 316 deletions(-) diff --git a/test/integrations/librdkafka_admin_features_test.rb b/test/integrations/librdkafka_admin_features_test.rb index d21383a5..7ff0e6fd 100644 --- a/test/integrations/librdkafka_admin_features_test.rb +++ b/test/integrations/librdkafka_admin_features_test.rb @@ -2,12 +2,19 @@ # This integration test verifies that librdkafka admin is compiled with all expected builtin features. # These features are critical for Karafka and rdkafka-ruby to function properly. +# +# Exit codes: +# - 0: All expected features found (test passes) +# - 1: Missing expected features or parsing failed (test fails) -require_relative "../test_helper" +require "rdkafka" +require "logger" require "stringio" +$stdout.sync = true + # Expected features that should be present in our compiled librdkafka -ADMIN_EXPECTED_BUILTIN_FEATURES = %w[ +EXPECTED_BUILTIN_FEATURES = %w[ gzip snappy ssl @@ -22,62 +29,56 @@ ].freeze # Precompiled builds include GSSAPI (via MIT Kerberos + Cyrus SASL) -ADMIN_PRECOMPILED_FEATURES = (ADMIN_EXPECTED_BUILTIN_FEATURES + %w[sasl_gssapi]).freeze +PRECOMPILED_FEATURES = (EXPECTED_BUILTIN_FEATURES + %w[sasl_gssapi]).freeze + +captured_output = StringIO.new +logger = Logger.new(captured_output) +logger.level = Logger::DEBUG -describe "Librdkafka Admin Features" do - before do - @captured_output = StringIO.new - @logger = Logger.new(@captured_output) - @logger.level = Logger::DEBUG +Rdkafka::Config.logger = logger +Rdkafka::Config.ensure_log_thread - @original_logger = Rdkafka::Config.logger - Rdkafka::Config.logger = @logger - Rdkafka::Config.ensure_log_thread +config = Rdkafka::Config.new( + "bootstrap.servers": "localhost:9092", + "client.id": "admin-feature-test", + debug: "all" +) - config = Rdkafka::Config.new( - "bootstrap.servers": "localhost:9092", - "client.id": "admin-feature-test", - debug: "all" - ) +admin = config.admin - @admin = config.admin +# Wait for log messages to be processed +sleep 2 - # Wait for log messages to be processed - sleep 2 - end +admin.close - after do - @admin&.close - Rdkafka::Config.logger = @original_logger - end +# Get all log output +log_content = captured_output.string - it "includes all expected builtin features in admin client logs" do - log_content = @captured_output.string +# Find the initialization line that contains builtin.features +feature_line = log_content.lines.find { |line| line.include?("builtin.features") } - # Find the initialization line that contains builtin.features - feature_line = log_content.lines.find { |line| line.include?("builtin.features") } +unless feature_line - refute_nil feature_line, "Could not find builtin.features in admin log output" + exit(1) +end - # Extract the features list from the line - # Format: "... (builtin.features gzip,snappy,ssl,..., ...)" - match = feature_line.match(/builtin\.features\s+([^,]+(?:,[^,\s]+)*)/i) +# Extract the features list from the line +# Format: "... (builtin.features gzip,snappy,ssl,..., ...)" +match = feature_line.match(/builtin\.features\s+([^,]+(?:,[^,\s]+)*)/i) - refute_nil match, "Could not parse builtin.features from log line: #{feature_line}" +unless match + + exit(1) +end - features_string = match[1] - actual_features = features_string.split(",").map(&:strip) +features_string = match[1] +actual_features = features_string.split(",").map(&:strip) - # Verify all expected features are present - expected = if ENV["RDKAFKA_PRECOMPILED"] == "true" - ADMIN_PRECOMPILED_FEATURES - else - ADMIN_EXPECTED_BUILTIN_FEATURES - end +# Verify all expected features are present +expected = (ENV["RDKAFKA_PRECOMPILED"] == "true") ? PRECOMPILED_FEATURES : EXPECTED_BUILTIN_FEATURES +missing_features = expected - actual_features - missing_features = expected - actual_features +if missing_features.any? - assert_empty missing_features, - "Missing expected builtin features: #{missing_features.join(", ")}" - end + exit(1) end diff --git a/test/integrations/librdkafka_consumer_features_test.rb b/test/integrations/librdkafka_consumer_features_test.rb index 23d50993..2ff0edcd 100644 --- a/test/integrations/librdkafka_consumer_features_test.rb +++ b/test/integrations/librdkafka_consumer_features_test.rb @@ -2,12 +2,19 @@ # This integration test verifies that librdkafka consumer is compiled with all expected builtin features. # These features are critical for Karafka and rdkafka-ruby to function properly. +# +# Exit codes: +# - 0: All expected features found (test passes) +# - 1: Missing expected features or parsing failed (test fails) -require_relative "../test_helper" +require "rdkafka" +require "logger" require "stringio" +$stdout.sync = true + # Expected features that should be present in our compiled librdkafka -CONSUMER_EXPECTED_BUILTIN_FEATURES = %w[ +EXPECTED_BUILTIN_FEATURES = %w[ gzip snappy ssl @@ -22,63 +29,57 @@ ].freeze # Precompiled builds include GSSAPI (via MIT Kerberos + Cyrus SASL) -CONSUMER_PRECOMPILED_FEATURES = (CONSUMER_EXPECTED_BUILTIN_FEATURES + %w[sasl_gssapi]).freeze +PRECOMPILED_FEATURES = (EXPECTED_BUILTIN_FEATURES + %w[sasl_gssapi]).freeze + +captured_output = StringIO.new +logger = Logger.new(captured_output) +logger.level = Logger::DEBUG -describe "Librdkafka Consumer Features" do - before do - @captured_output = StringIO.new - @logger = Logger.new(@captured_output) - @logger.level = Logger::DEBUG +Rdkafka::Config.logger = logger +Rdkafka::Config.ensure_log_thread - @original_logger = Rdkafka::Config.logger - Rdkafka::Config.logger = @logger - Rdkafka::Config.ensure_log_thread +config = Rdkafka::Config.new( + "bootstrap.servers": "localhost:9092", + "client.id": "consumer-feature-test", + "group.id": "feature-test-group", + debug: "all" +) - config = Rdkafka::Config.new( - "bootstrap.servers": "localhost:9092", - "client.id": "consumer-feature-test", - "group.id": "feature-test-group", - debug: "all" - ) +consumer = config.consumer - @consumer = config.consumer +# Wait for log messages to be processed +sleep 2 - # Wait for log messages to be processed - sleep 2 - end +consumer.close - after do - @consumer&.close - Rdkafka::Config.logger = @original_logger - end +# Get all log output +log_content = captured_output.string - it "includes all expected builtin features in consumer client logs" do - log_content = @captured_output.string +# Find the initialization line that contains builtin.features +feature_line = log_content.lines.find { |line| line.include?("builtin.features") } - # Find the initialization line that contains builtin.features - feature_line = log_content.lines.find { |line| line.include?("builtin.features") } +unless feature_line - refute_nil feature_line, "Could not find builtin.features in consumer log output" + exit(1) +end - # Extract the features list from the line - # Format: "... (builtin.features gzip,snappy,ssl,..., ...)" - match = feature_line.match(/builtin\.features\s+([^,]+(?:,[^,\s]+)*)/i) +# Extract the features list from the line +# Format: "... (builtin.features gzip,snappy,ssl,..., ...)" +match = feature_line.match(/builtin\.features\s+([^,]+(?:,[^,\s]+)*)/i) - refute_nil match, "Could not parse builtin.features from log line: #{feature_line}" +unless match + + exit(1) +end - features_string = match[1] - actual_features = features_string.split(",").map(&:strip) +features_string = match[1] +actual_features = features_string.split(",").map(&:strip) - # Verify all expected features are present - expected = if ENV["RDKAFKA_PRECOMPILED"] == "true" - CONSUMER_PRECOMPILED_FEATURES - else - CONSUMER_EXPECTED_BUILTIN_FEATURES - end +# Verify all expected features are present +expected = (ENV["RDKAFKA_PRECOMPILED"] == "true") ? PRECOMPILED_FEATURES : EXPECTED_BUILTIN_FEATURES +missing_features = expected - actual_features - missing_features = expected - actual_features +if missing_features.any? - assert_empty missing_features, - "Missing expected builtin features: #{missing_features.join(", ")}" - end + exit(1) end diff --git a/test/integrations/librdkafka_producer_features_test.rb b/test/integrations/librdkafka_producer_features_test.rb index 3cef6d3c..f21e6306 100644 --- a/test/integrations/librdkafka_producer_features_test.rb +++ b/test/integrations/librdkafka_producer_features_test.rb @@ -2,12 +2,19 @@ # This integration test verifies that librdkafka producer is compiled with all expected builtin features. # These features are critical for Karafka and rdkafka-ruby to function properly. +# +# Exit codes: +# - 0: All expected features found (test passes) +# - 1: Missing expected features or parsing failed (test fails) -require_relative "../test_helper" +require "rdkafka" +require "logger" require "stringio" +$stdout.sync = true + # Expected features that should be present in our compiled librdkafka -PRODUCER_EXPECTED_BUILTIN_FEATURES = %w[ +EXPECTED_BUILTIN_FEATURES = %w[ gzip snappy ssl @@ -22,62 +29,56 @@ ].freeze # Precompiled builds include GSSAPI (via MIT Kerberos + Cyrus SASL) -PRODUCER_PRECOMPILED_FEATURES = (PRODUCER_EXPECTED_BUILTIN_FEATURES + %w[sasl_gssapi]).freeze +PRECOMPILED_FEATURES = (EXPECTED_BUILTIN_FEATURES + %w[sasl_gssapi]).freeze + +captured_output = StringIO.new +logger = Logger.new(captured_output) +logger.level = Logger::DEBUG -describe "Librdkafka Producer Features" do - before do - @captured_output = StringIO.new - @logger = Logger.new(@captured_output) - @logger.level = Logger::DEBUG +Rdkafka::Config.logger = logger +Rdkafka::Config.ensure_log_thread - @original_logger = Rdkafka::Config.logger - Rdkafka::Config.logger = @logger - Rdkafka::Config.ensure_log_thread +config = Rdkafka::Config.new( + "bootstrap.servers": "localhost:9092", + "client.id": "producer-feature-test", + debug: "all" +) - config = Rdkafka::Config.new( - "bootstrap.servers": "localhost:9092", - "client.id": "producer-feature-test", - debug: "all" - ) +producer = config.producer - @producer = config.producer +# Wait for log messages to be processed +sleep 2 - # Wait for log messages to be processed - sleep 2 - end +producer.close - after do - @producer&.close - Rdkafka::Config.logger = @original_logger - end +# Get all log output +log_content = captured_output.string - it "includes all expected builtin features in producer client logs" do - log_content = @captured_output.string +# Find the initialization line that contains builtin.features +feature_line = log_content.lines.find { |line| line.include?("builtin.features") } - # Find the initialization line that contains builtin.features - feature_line = log_content.lines.find { |line| line.include?("builtin.features") } +unless feature_line - refute_nil feature_line, "Could not find builtin.features in producer log output" + exit(1) +end - # Extract the features list from the line - # Format: "... (builtin.features gzip,snappy,ssl,..., ...)" - match = feature_line.match(/builtin\.features\s+([^,]+(?:,[^,\s]+)*)/i) +# Extract the features list from the line +# Format: "... (builtin.features gzip,snappy,ssl,..., ...)" +match = feature_line.match(/builtin\.features\s+([^,]+(?:,[^,\s]+)*)/i) - refute_nil match, "Could not parse builtin.features from log line: #{feature_line}" +unless match + + exit(1) +end - features_string = match[1] - actual_features = features_string.split(",").map(&:strip) +features_string = match[1] +actual_features = features_string.split(",").map(&:strip) - # Verify all expected features are present - expected = if ENV["RDKAFKA_PRECOMPILED"] == "true" - PRODUCER_PRECOMPILED_FEATURES - else - PRODUCER_EXPECTED_BUILTIN_FEATURES - end +# Verify all expected features are present +expected = (ENV["RDKAFKA_PRECOMPILED"] == "true") ? PRECOMPILED_FEATURES : EXPECTED_BUILTIN_FEATURES +missing_features = expected - actual_features - missing_features = expected - actual_features +if missing_features.any? - assert_empty missing_features, - "Missing expected builtin features: #{missing_features.join(", ")}" - end + exit(1) end diff --git a/test/integrations/ssl_stress_test.rb b/test/integrations/ssl_stress_test.rb index 04e95847..4112ae51 100644 --- a/test/integrations/ssl_stress_test.rb +++ b/test/integrations/ssl_stress_test.rb @@ -1,6 +1,6 @@ -# frozen_string_literal: true - -# This integration test is designed to stress-test the OpenSSL SSL/TLS layer under high concurrency +# ssl_stress_test.rb +# +# This script is designed to stress-test the OpenSSL SSL/TLS layer under high concurrency # to help detect regressions like the one described in OpenSSL issue #28171: # https://github.com/openssl/openssl/issues/28171 # @@ -24,103 +24,89 @@ # # In case of a failure, segfault will happen -require_relative "../test_helper" +require "rdkafka" require "socket" require "openssl" -describe "SSL Stress Test" do - before do - @starting_port = 19093 - @num_ports = 150 - @batches = 100 - @ports = @starting_port...(@starting_port + @num_ports) - - @config = { - "bootstrap.servers": Array.new(@num_ports) { |i| "127.0.0.1:#{@starting_port + i}" }.join(","), - "security.protocol": "SSL", - "enable.ssl.certificate.verification": false - } - - # Generate in-memory self-signed cert - @key = OpenSSL::PKey::RSA.new(2048) - - name = OpenSSL::X509::Name.parse("/CN=127.0.0.1") - @cert = OpenSSL::X509::Certificate.new - @cert.version = 2 - @cert.serial = 1 - @cert.subject = name - @cert.issuer = name - @cert.public_key = @key.public_key - @cert.not_before = Time.now - @cert.not_after = Time.now + 3600 - @cert.sign(@key, OpenSSL::Digest.new("SHA256")) - - # Start servers on multiple ports - @server_threads = @ports.map do |port| - Thread.new do - # Prepare SSL context - # We do not use a shared context for the server because the goal is to stress librdkafka - # layer and not the Ruby SSL layer - ssl_context = OpenSSL::SSL::SSLContext.new - ssl_context.cert = @cert - ssl_context.key = @key - - tcp_server = TCPServer.new("127.0.0.1", port) - ssl_server = OpenSSL::SSL::SSLServer.new(tcp_server, ssl_context) - - loop do - ssl_socket = ssl_server.accept - ssl_socket.close - rescue => e - # Some errors are expected and irrelevant - next if e.message.include?("unexpected eof while reading") - end - end - end - - # Wait for the servers to be available - # We want to make sure that they are available so we are sure that librdkafka actually - # hammers them - timeout = 30 - start = Time.now +$stdout.sync = true + +STARTING_PORT = 19093 +NUM_PORTS = 150 +BATCHES = 100 +PORTS = STARTING_PORT...(STARTING_PORT + NUM_PORTS) + +CONFIG = { + "bootstrap.servers": Array.new(NUM_PORTS) { |i| "127.0.0.1:#{19093 + i}" }.join(","), + "security.protocol": "SSL", + "enable.ssl.certificate.verification": false +} + +# Generate in-memory self-signed cert +key = OpenSSL::PKey::RSA.new(2048) + +name = OpenSSL::X509::Name.parse("/CN=127.0.0.1") +cert = OpenSSL::X509::Certificate.new +cert.version = 2 +cert.serial = 1 +cert.subject = name +cert.issuer = name +cert.public_key = key.public_key +cert.not_before = Time.now +cert.not_after = Time.now + 3600 +cert.sign(key, OpenSSL::Digest.new("SHA256")) + +# Start servers on multiple ports +PORTS.map do |port| + Thread.new do + # Prepare SSL context + # We do not use a shared context for the server because the goal is to stress librdkafka layer + # and not the Ruby SSL layer + ssl_context = OpenSSL::SSL::SSLContext.new + ssl_context.cert = cert + ssl_context.key = key + + tcp_server = TCPServer.new("127.0.0.1", port) + ssl_server = OpenSSL::SSL::SSLServer.new(tcp_server, ssl_context) loop do - all_up = @ports.all? do |port| - TCPSocket.new("127.0.0.1", port).close - true - rescue - false - end - - break if all_up - - raise "Timeout waiting for SSL servers" if Time.now - start > timeout - - sleep 0.1 + ssl_socket = ssl_server.accept + ssl_socket.close + rescue => e + # Some errors are expected and irrelevant + next if e.message.include?("unexpected eof while reading") end end +end - after do - @server_threads&.each(&:kill) +timeout = 30 +start = Time.now + +# Wait for the servers to be available +# We want to make sure that they are available so we are sure that librdkafka actually hammers +# them +loop do + all_up = PORTS.all? do |port| + TCPSocket.new("127.0.0.1", port).close + true + rescue + false end - it "does not crash under heavy concurrent SSL connection churn" do - start_time = Time.now - duration = 60 * 10 # 10 minutes - it should crash faster than that if SSL vulnerable + break if all_up + + raise "Timeout waiting for SSL servers" if Time.now - start > timeout - while Time.now - start_time < duration - consumers = Array.new(@batches) do - Rdkafka::Config.new(@config).consumer - end + sleep 0.1 +end - # This print is needed. No idea why but it increases the chances of segfault - $stdout.print "" +start_time = Time.now +duration = 60 * 10 # 10 minutes - it should crash faster than that if SSL vulnerable - sleep(1) - consumers.each(&:close) - end +while Time.now - start_time < duration + css = Array.new(BATCHES) { Rdkafka::Config.new(CONFIG) } + csss = css.map(&:consumer) + # This print is needed. No idea why but it increases the chances of segfault - # If we reach here without segfault, the test passes - pass - end + sleep(1) + csss.each(&:close) end diff --git a/test/integrations/unregistered_scheme_file_test.rb b/test/integrations/unregistered_scheme_file_test.rb index d74b9bfd..dd1a7c4c 100644 --- a/test/integrations/unregistered_scheme_file_test.rb +++ b/test/integrations/unregistered_scheme_file_test.rb @@ -8,110 +8,111 @@ # These errors occur when rdkafka's underlying OpenSSL library encounters issues # with SSL certificate validation, particularly related to file scheme handling # and missing certificate directories. +# +# Exit codes: +# - 0: Target error messages NOT detected after 5 seconds (test fails - errors missing) +# - 1: Target error messages detected (test passes - errors are present as expected) +# - 2: Unexpected exception occurred during test execution -require_relative "../test_helper" +require "rdkafka" require "socket" require "openssl" require "stringio" +require "logger" + +$stdout.sync = true + +captured_output = StringIO.new +Rdkafka::Config.logger = Logger.new(captured_output) + +# Start a dummy SSL server with self-signed certificate +ssl_server_thread = Thread.new do + # Create TCP server + tcp_server = TCPServer.new("localhost", 9099) + + # Generate self-signed certificate + key = OpenSSL::PKey::RSA.new(2048) + cert = OpenSSL::X509::Certificate.new + cert.version = 2 + cert.serial = 1 + cert.subject = OpenSSL::X509::Name.parse("/DC=org/DC=ruby-test/CN=localhost") + cert.issuer = cert.subject + cert.public_key = key.public_key + cert.not_before = Time.now + cert.not_after = cert.not_before + 365 * 24 * 60 * 60 # 1 year + + # Add extensions + ef = OpenSSL::X509::ExtensionFactory.new + ef.subject_certificate = cert + ef.issuer_certificate = cert + cert.add_extension(ef.create_extension("basicConstraints", "CA:TRUE", true)) + cert.add_extension(ef.create_extension("keyUsage", "keyCertSign, cRLSign", true)) + cert.add_extension(ef.create_extension("subjectKeyIdentifier", "hash", false)) + cert.add_extension(ef.create_extension("authorityKeyIdentifier", "keyid:always", false)) + + cert.sign(key, OpenSSL::Digest.new("SHA256")) + + # Create SSL context + ssl_context = OpenSSL::SSL::SSLContext.new + ssl_context.cert = cert + ssl_context.key = key + + # Wrap TCP server with SSL + ssl_server = OpenSSL::SSL::SSLServer.new(tcp_server, ssl_context) + + loop do + client = ssl_server.accept + client.puts("Invalid Kafka broker") + client.close + rescue + # Ignore SSL server errors - they're expected + end +rescue +end -describe "Unregistered Scheme File" do - before do - @captured_output = StringIO.new - @original_logger = Rdkafka::Config.logger - Rdkafka::Config.logger = Logger.new(@captured_output) - - # Start a dummy SSL server with self-signed certificate - @ssl_server_thread = Thread.new do - # Create TCP server - tcp_server = TCPServer.new("localhost", 9099) - - # Generate self-signed certificate - key = OpenSSL::PKey::RSA.new(2048) - cert = OpenSSL::X509::Certificate.new - cert.version = 2 - cert.serial = 1 - cert.subject = OpenSSL::X509::Name.parse("/DC=org/DC=ruby-test/CN=localhost") - cert.issuer = cert.subject - cert.public_key = key.public_key - cert.not_before = Time.now - cert.not_after = cert.not_before + 365 * 24 * 60 * 60 # 1 year - - # Add extensions - ef = OpenSSL::X509::ExtensionFactory.new - ef.subject_certificate = cert - ef.issuer_certificate = cert - cert.add_extension(ef.create_extension("basicConstraints", "CA:TRUE", true)) - cert.add_extension(ef.create_extension("keyUsage", "keyCertSign, cRLSign", true)) - cert.add_extension(ef.create_extension("subjectKeyIdentifier", "hash", false)) - cert.add_extension(ef.create_extension("authorityKeyIdentifier", "keyid:always", false)) - - cert.sign(key, OpenSSL::Digest.new("SHA256")) - - # Create SSL context - ssl_context = OpenSSL::SSL::SSLContext.new - ssl_context.cert = cert - ssl_context.key = key - - # Wrap TCP server with SSL - ssl_server = OpenSSL::SSL::SSLServer.new(tcp_server, ssl_context) - - loop do - client = ssl_server.accept - client.puts("Invalid Kafka broker") - client.close - rescue - # Ignore SSL server errors - they're expected - end - rescue - # Ignore thread-level errors - end - - # Give the server time to start - sleep 1 - - config = Rdkafka::Config.new( - "bootstrap.servers": "localhost:9099", - "security.protocol": "SSL", - "client.id": "test-client", - "group.id": "test-group" - ) +# Give the server time to start +sleep 1 - @consumer = config.consumer - end +# Try connecting to the dummy SSL server +config = Rdkafka::Config.new( + "bootstrap.servers": "localhost:9099", + "security.protocol": "SSL", + "client.id": "test-client", + "group.id": "test-group" +) - after do - @consumer&.close - @ssl_server_thread&.kill - Rdkafka::Config.logger = @original_logger - end +begin + consumer = config.consumer - it "does not produce unregistered scheme or missing file errors in SSL logs" do - @consumer.subscribe("test-topic") + consumer.subscribe("test-topic") - # Try to poll for messages - this triggers SSL errors - start_time = Time.now - timeout = 5 + # Try to poll for messages - this triggers SSL errors + start_time = Time.now + timeout = 5 - while Time.now - start_time < timeout - begin - @consumer.poll(1000) - rescue - break - end + while Time.now - start_time < timeout + begin + consumer.poll(1000) + rescue + break end + end - # Wait for rdkafka to finish logging errors - sleep 2 - - # Check captured logs for target error patterns - @captured_output.rewind - log_lines = @captured_output.readlines + # Wait for rdkafka to finish logging errors + sleep 2 - log_lines.each do |line| - refute_includes line, "routines::unregistered scheme", - "Found 'unregistered scheme' error in SSL logs" - refute_includes line, "system library::No such file or directory", - "Found 'No such file or directory' error in SSL logs" - end + # Check captured logs for target error patterns + captured_output.rewind + captured_output.readlines.each do |line| + exit(1) if line.include?("routines::unregistered scheme") + exit(1) if line.include?("system library::No such file or directory") end +rescue + exit(2) +ensure + consumer&.close if defined?(consumer) && consumer + ssl_server_thread&.kill end + +# Exit with 0 if target errors not detected +exit(0) diff --git a/test/test_helper.rb b/test/test_helper.rb index d43b91fb..5f0e579b 100644 --- a/test/test_helper.rb +++ b/test/test_helper.rb @@ -124,3 +124,13 @@ def setup end Minitest::Spec.prepend(GlobalBeforeEach) + +# Force GC after all tests to trigger native kafka handle finalizers in an orderly fashion. +# Without this, finalizers run during Ruby's shutdown phase and can race with librdkafka's +# internal cleanup, causing segfaults (exit code 139) when run via Minitest::TestTask. +Minitest.after_run do + # Multiple GC passes to ensure all weak-ref and nested native objects are collected + 3.times { GC.start } + # Give librdkafka background threads time to finish cleanup + sleep 1 +end