diff --git a/.github/workflows/linux.yml b/.github/workflows/linux.yml index f31fd2f3be77f..ae09ccf4480fe 100644 --- a/.github/workflows/linux.yml +++ b/.github/workflows/linux.yml @@ -33,12 +33,14 @@ jobs: optional_macros: CCFLAGS+=-std=c++20 platform_file: include $(ACE_ROOT)/include/makeinclude/platform_linux.GNU os: ubuntu-24.04 - - CC: gcc-14 + - feature: ACE full tests + CC: gcc-14 CXX: g++-14 - PackageDeps: g++-14 - optional_macros: CCFLAGS+=-std=c++20 + PackageDeps: g++-14 liburing-dev + optional_macros: c++20=1 platform_file: include $(ACE_ROOT)/include/makeinclude/platform_linux.GNU os: ubuntu-24.04 + full_test_suite: true - CC: clang-12 CXX: clang++-12 PackageDeps: clang-12 @@ -136,6 +138,11 @@ jobs: '${{ matrix.optional_macros }}' >> ${env:ACE_ROOT}/include/makeinclude/platform_macros.GNU shell: pwsh if: matrix.optional_macros != '' + - name: enable io_uring for ACE full test suite + run: | + 'uring=1' >> ${env:ACE_ROOT}/include/makeinclude/platform_macros.GNU + shell: pwsh + if: matrix.full_test_suite == true - name: extend $ACE_ROOT/include/makeinclude/platform_macros.GNU run: | '${{ matrix.platform_file }}' >> ${env:ACE_ROOT}/include/makeinclude/platform_macros.GNU @@ -152,6 +159,11 @@ jobs: '${{ matrix.optional_feature }}' >> ${env:ACE_ROOT}/bin/MakeProjectCreator/config/default.features if: matrix.optional_feature != '' shell: pwsh + - name: enable io_uring MPC feature for ACE full test suite + run: | + 'uring=1' >> ${env:ACE_ROOT}/bin/MakeProjectCreator/config/default.features + shell: pwsh + if: matrix.full_test_suite == true - name: initialize CodeQL uses: github/codeql-action/init@v3 with: @@ -182,6 +194,278 @@ jobs: make -j 6 -C ${env:TAO_ROOT}/tests/IDL_Test shell: pwsh if: matrix.feature != 'CORBA/e micro' + - name: Probe io_uring availability for ACE full test suite + id: full_suite_uring_probe + shell: bash + working-directory: ${{ env.ACE_ROOT }}/tests + if: matrix.full_test_suite == true + run: | + set -euo pipefail + export LD_LIBRARY_PATH="${ACE_ROOT}/lib:${ACE_ROOT}/tests:${LD_LIBRARY_PATH:-}" + set +e + BASE_PORT=27000 RUN_ID=gha-full-suite-uring-probe TIMEOUT_SECS=90 perl ./run_proactor_correctness_matrix.pl \ + --test Proactor_Contract_Test \ + --backend uring + rc=$? + set -e + if [ "$rc" -eq 0 ]; then + echo "available=true" >> "$GITHUB_OUTPUT" + exit 0 + fi + probe_unavailable=0 + for log in log/proactor_matrix/gha-full-suite-uring-probe/Proactor_Contract_Test.uring*.log; do + [ -e "$log" ] || continue + if grep -Eiq 'Failed to initialize uring proactor|io_uring_queue_init|Operation not permitted|EPERM' "$log"; then + probe_unavailable=1 + break + fi + done + if [ "$probe_unavailable" -eq 1 ]; then + echo "available=false" >> "$GITHUB_OUTPUT" + echo "io_uring is unavailable on this runner; running the full suite without -Config uring." + exit 0 + fi + exit "$rc" + - name: Run ACE full test suite + shell: bash + working-directory: ${{ env.ACE_ROOT }}/tests + if: matrix.full_test_suite == true + run: | + set -euo pipefail + export LD_LIBRARY_PATH="${ACE_ROOT}/lib:${ACE_ROOT}/tests:${LD_LIBRARY_PATH:-}" + test_configs=(-Config Linux -Config FIXED_BUGS_ONLY -Config GHA) + if [ "${{ steps.full_suite_uring_probe.outputs.available }}" = "true" ]; then + test_configs+=(-Config uring) + fi + set +e + perl ./run_test.pl "${test_configs[@]}" 2>&1 | tee run_test_full_suite.log + run_status=${PIPESTATUS[0]} + set -e + parse_status=0 + perl -ne 'if (/^(Error|ERROR):/ || /auto_run_tests_finished:.*Result:(?!0\b)-?\d+/) { print; $failed = 1 } END { exit($failed ? 1 : 0) }' \ + run_test_full_suite.log || parse_status=$? + if [ "$run_status" -ne 0 ]; then + echo "run_test.pl failed with exit status $run_status" + exit "$run_status" + fi + if [ "$parse_status" -ne 0 ]; then + echo "ACE full test suite reported errors or failing test results." + exit "$parse_status" + fi + - name: Upload ACE full test suite logs + if: always() && matrix.full_test_suite == true + uses: actions/upload-artifact@v4 + with: + name: linux-ace-full-test-suite + path: | + ${{ env.ACE_ROOT }}/tests/run_test_full_suite.log + ${{ env.ACE_ROOT }}/tests/log + if-no-files-found: ignore - name: Perform CodeQL Analysis uses: github/codeql-action/analyze@v3 if: matrix.feature == 'CodeQL' + + proactor-posix-smoke: + runs-on: ubuntu-24.04 + timeout-minutes: 25 + name: ubuntu-24.04 gcc-13 Proactor POSIX smoke + env: + ACE_ROOT: ${{ github.workspace }}/ACE + MPC_ROOT: ${{ github.workspace }}/MPC + CC: gcc-13 + CXX: g++-13 + steps: + - name: checkout ACE/TAO + uses: actions/checkout@v6 + - name: checkout MPC + uses: actions/checkout@v6 + with: + repository: DOCGroup/MPC + path: ${{ env.MPC_ROOT }} + - name: Install apt packages + shell: bash + run: | + set -euxo pipefail + sudo apt-get update + sudo apt-get install -y "$CXX" + - name: Configure ACE + shell: bash + run: | + set -euxo pipefail + cat > "$ACE_ROOT/ace/config.h" <<'EOF' + #include "ace/config-linux.h" + EOF + cat > "$ACE_ROOT/include/makeinclude/platform_macros.GNU" <<'EOF' + ipv6=1 + include $(ACE_ROOT)/include/makeinclude/platform_linux.GNU + EOF + cat > "$ACE_ROOT/bin/MakeProjectCreator/config/default.features" <<'EOF' + ipv6=1 + versioned_namespace=1 + EOF + - name: Generate ACE makefiles + shell: bash + run: | + set -euxo pipefail + perl "$ACE_ROOT/bin/mwc.pl" -type gnuace "$ACE_ROOT/ace/ace.mwc" -workers 4 + perl "$ACE_ROOT/bin/mwc.pl" -type gnuace "$ACE_ROOT/tests/tests.mwc" -workers 4 + - name: Build Proactor smoke targets + shell: bash + run: | + set -euxo pipefail + proactor_targets=( + Proactor_Contract_Test + Proactor_File_Test + Proactor_Stress_Test + Proactor_Test + Proactor_Test_IPV6 + Proactor_Timer_Test + Proactor_UDP_Test + ) + make -j4 -C "$ACE_ROOT/ace" depend + make -j4 -C "$ACE_ROOT/ace" ACE + make -j4 -C "$ACE_ROOT/tests" depend + make -j4 -C "$ACE_ROOT/tests" "${proactor_targets[@]}" + - name: Run Proactor POSIX smoke tests + shell: bash + working-directory: ${{ env.ACE_ROOT }}/tests + run: | + set -euxo pipefail + BASE_PORT=23000 RUN_ID=gha-posix-contract INCLUDE_DEFAULT=1 perl ./run_proactor_correctness_matrix.pl \ + --test Proactor_Contract_Test \ + --backend default \ + --backend aiocb \ + --backend sig \ + --backend cb + BASE_PORT=24000 RUN_ID=gha-posix-default INCLUDE_DEFAULT=1 perl ./run_proactor_correctness_matrix.pl \ + --backend default \ + --test Proactor_File_Test \ + --test Proactor_Stress_Test \ + --test Proactor_Test \ + --test Proactor_Test_IPV6 \ + --test Proactor_Timer_Test \ + --test Proactor_UDP_Test + - name: Upload Proactor POSIX smoke logs + if: always() + uses: actions/upload-artifact@v4 + with: + name: linux-proactor-posix-smoke + path: ${{ env.ACE_ROOT }}/tests/log/proactor_matrix/gha-posix* + if-no-files-found: ignore + + proactor-uring-smoke: + runs-on: ubuntu-24.04 + timeout-minutes: 25 + name: ubuntu-24.04 gcc-13 Proactor io_uring smoke + env: + ACE_ROOT: ${{ github.workspace }}/ACE + MPC_ROOT: ${{ github.workspace }}/MPC + CC: gcc-13 + CXX: g++-13 + steps: + - name: checkout ACE/TAO + uses: actions/checkout@v6 + - name: checkout MPC + uses: actions/checkout@v6 + with: + repository: DOCGroup/MPC + path: ${{ env.MPC_ROOT }} + - name: Install apt packages + shell: bash + run: | + set -euxo pipefail + sudo apt-get update + sudo apt-get install -y "$CXX" liburing-dev + - name: Configure ACE for io_uring + shell: bash + run: | + set -euxo pipefail + cat > "$ACE_ROOT/ace/config.h" <<'EOF' + #include "ace/config-linux.h" + EOF + cat > "$ACE_ROOT/include/makeinclude/platform_macros.GNU" <<'EOF' + ipv6=1 + uring=1 + include $(ACE_ROOT)/include/makeinclude/platform_linux.GNU + EOF + cat > "$ACE_ROOT/bin/MakeProjectCreator/config/default.features" <<'EOF' + ipv6=1 + versioned_namespace=1 + EOF + - name: Generate ACE makefiles + shell: bash + run: | + set -euxo pipefail + perl "$ACE_ROOT/bin/mwc.pl" -type gnuace "$ACE_ROOT/ace/ace.mwc" -workers 4 + perl "$ACE_ROOT/bin/mwc.pl" -type gnuace "$ACE_ROOT/tests/tests.mwc" -workers 4 + - name: Build Proactor io_uring smoke targets + shell: bash + run: | + set -euxo pipefail + proactor_targets=( + Proactor_Contract_Test + Proactor_File_Test + Proactor_Scatter_Gather_Test + Proactor_Stress_Test + Proactor_Test + Proactor_Test_IPV6 + Proactor_Timer_Test + Proactor_UDP_Test + ) + make -j4 -C "$ACE_ROOT/ace" depend + make -j4 -C "$ACE_ROOT/ace" ACE + make -j4 -C "$ACE_ROOT/tests" depend + make -j4 -C "$ACE_ROOT/tests" "${proactor_targets[@]}" + - name: Probe io_uring availability + id: uring_probe + shell: bash + working-directory: ${{ env.ACE_ROOT }}/tests + run: | + set -euo pipefail + set +e + BASE_PORT=25000 RUN_ID=gha-uring-probe perl ./run_proactor_correctness_matrix.pl \ + --test Proactor_Contract_Test \ + --backend uring + rc=$? + set -e + if [ "$rc" -eq 0 ]; then + echo "available=true" >> "$GITHUB_OUTPUT" + exit 0 + fi + probe_unavailable=0 + for log in log/proactor_matrix/gha-uring-probe/Proactor_Contract_Test.uring*.log; do + [ -e "$log" ] || continue + if grep -Eiq 'Failed to initialize uring proactor|io_uring_queue_init|Operation not permitted|EPERM' "$log"; then + probe_unavailable=1 + break + fi + done + if [ "$probe_unavailable" -eq 1 ]; then + echo "available=false" >> "$GITHUB_OUTPUT" + echo "io_uring is unavailable on this runner; skipping io_uring smoke." + exit 0 + fi + exit "$rc" + - name: Run Proactor io_uring smoke tests + if: steps.uring_probe.outputs.available == 'true' + shell: bash + working-directory: ${{ env.ACE_ROOT }}/tests + run: | + set -euxo pipefail + BASE_PORT=26000 RUN_ID=gha-uring-smoke perl ./run_proactor_correctness_matrix.pl \ + --backend uring \ + --test Proactor_Contract_Test \ + --test Proactor_File_Test \ + --test Proactor_Scatter_Gather_Test \ + --test Proactor_Stress_Test \ + --test Proactor_Test \ + --test Proactor_Test_IPV6 \ + --test Proactor_Timer_Test \ + --test Proactor_UDP_Test + - name: Upload Proactor io_uring smoke logs + if: always() + uses: actions/upload-artifact@v4 + with: + name: linux-proactor-uring-smoke + path: ${{ env.ACE_ROOT }}/tests/log/proactor_matrix/gha-uring* + if-no-files-found: ignore diff --git a/.github/workflows/windows.yml b/.github/workflows/windows.yml index b69c0ebc74fe5..eabc3d62619ef 100644 --- a/.github/workflows/windows.yml +++ b/.github/workflows/windows.yml @@ -97,7 +97,7 @@ jobs: - name: create $ACE_ROOT/bin/MakeProjectCreator/config/default.features run: | echo "ipv6=1" | out-file -encoding ASCII ${env:ACE_ROOT}/bin/MakeProjectCreator/config/default.features - echo "xerces3=1" | out-file -encoding ASCII ${env:ACE_ROOT}/bin/MakeProjectCreator/config/default.features + echo "xerces3=1" | out-file -append -encoding ASCII ${env:ACE_ROOT}/bin/MakeProjectCreator/config/default.features echo "ssl=1" | out-file -append -encoding ASCII ${env:ACE_ROOT}/bin/MakeProjectCreator/config/default.features echo "openssl11=1" | out-file -append -encoding ASCII ${env:ACE_ROOT}/bin/MakeProjectCreator/config/default.features echo "versioned_namespace=1" | out-file -append -encoding ASCII ${env:ACE_ROOT}/bin/MakeProjectCreator/config/default.features @@ -127,3 +127,72 @@ jobs: run: msbuild -maxcpucount -p:Platform=${{ matrix.BuildPlatform }} -p:Configuration=${{ matrix.BuildConfiguration }} ACE/tests/tests.sln - name: Build solution TAO/tests/IDL_Test/IDL_Test.sln run: msbuild -maxcpucount -p:Platform=${{ matrix.BuildPlatform }} -p:Configuration=${{ matrix.BuildConfiguration }} TAO/tests/IDL_Test/IDL_Test.sln + - name: Run ACE full test suite + if: matrix.name == 'VS2022Debug64Cxx20' + working-directory: ${{ env.ACE_ROOT }}/tests + shell: pwsh + run: | + $env:PATH = "${env:ACE_ROOT}\lib;${env:ACE_ROOT}\tests;${env:ACE_ROOT}\lib\${{ matrix.BuildPlatform }}\${{ matrix.BuildConfiguration }};${env:ACE_ROOT}\tests\${{ matrix.BuildPlatform }}\${{ matrix.BuildConfiguration }};${{ github.workspace }}\vcpkg_installed\${{ matrix.vcpkgarch }}\debug\bin;${{ github.workspace }}\vcpkg_installed\${{ matrix.vcpkgarch }}\bin;$env:PATH" + & perl .\run_test.pl -Config Win32 -Config MSVC -Config FIXED_BUGS_ONLY -Config GHA 2>&1 | + Tee-Object -FilePath run_test_full_suite.log + $runExit = $LASTEXITCODE + $failures = Select-String -Path run_test_full_suite.log -Pattern '^(Error|ERROR):', 'auto_run_tests_finished:.*Result:(?!0\b)-?\d+' + if ($runExit -ne 0) { + Write-Error "run_test.pl failed with exit status $runExit" + exit $runExit + } + if ($failures) { + $failures | ForEach-Object { Write-Output $_.Line } + Write-Error "ACE full test suite reported errors or failing test results." + exit 1 + } + - name: Upload ACE full test suite logs + if: always() && matrix.name == 'VS2022Debug64Cxx20' + uses: actions/upload-artifact@v4 + with: + name: windows-ace-full-test-suite + path: | + ${{ env.ACE_ROOT }}/tests/run_test_full_suite.log + ${{ env.ACE_ROOT }}/tests/log + if-no-files-found: ignore + - name: Run Proactor Win32 correctness smoke tests + if: matrix.name == 'VS2022Debug64Cxx17' + working-directory: ${{ env.ACE_ROOT }}/tests + shell: pwsh + run: | + $env:PATH = "${env:ACE_ROOT}\lib;${env:ACE_ROOT}\lib\${{ matrix.BuildPlatform }}\${{ matrix.BuildConfiguration }};${env:ACE_ROOT}\tests\${{ matrix.BuildPlatform }}\${{ matrix.BuildConfiguration }};$env:PATH" + $env:RUN_ID = "gha-win32-correctness" + $env:BASE_PORT = "23000" + perl .\run_proactor_correctness_matrix.pl --backend win32 ` + --test Proactor_Contract_Test ` + --test Proactor_File_Test ` + --test Proactor_Network_Performance_Test:tcp ` + --test Proactor_Network_Performance_Test:udp ` + --test Proactor_Scatter_Gather_Test ` + --test Proactor_Stress_Test ` + --test Proactor_Test ` + --test Proactor_Test_IPV6 ` + --test Proactor_Timer_Test ` + --test Proactor_UDP_Test + - name: Run Proactor Win32 performance smoke tests + if: matrix.name == 'VS2022Debug64Cxx17' + working-directory: ${{ env.ACE_ROOT }}/tests + shell: pwsh + run: | + $env:PATH = "${env:ACE_ROOT}\lib;${env:ACE_ROOT}\lib\${{ matrix.BuildPlatform }}\${{ matrix.BuildConfiguration }};${env:ACE_ROOT}\tests\${{ matrix.BuildPlatform }}\${{ matrix.BuildConfiguration }};$env:PATH" + $env:RUN_ID = "gha-win32-performance" + $env:BASE_PORT = "24000" + perl .\run_proactor_performance_matrix.pl --backend win32 ` + --scenario stress_recursive ` + --scenario tcp_1x1_light_v4 ` + --scenario tcp_1x16_moderate_mt_v4 ` + --scenario udp_1x16_moderate_mt_buf4m_v4 + - name: Upload Proactor Win32 smoke logs + if: always() && matrix.name == 'VS2022Debug64Cxx17' + uses: actions/upload-artifact@v4 + with: + name: windows-proactor-win32-smoke + path: | + ${{ env.ACE_ROOT }}/tests/log/proactor_matrix/gha-win32-correctness + ${{ env.ACE_ROOT }}/tests/log/proactor_performance/gha-win32-performance + if-no-files-found: ignore diff --git a/ACE/NEWS b/ACE/NEWS index f7c69ada88ebb..15c718a55ed96 100644 --- a/ACE/NEWS +++ b/ACE/NEWS @@ -1,6 +1,9 @@ USER VISIBLE CHANGES BETWEEN ACE-6.5.24 and ACE-6.5.25 ====================================================== +. Added an optional Linux io_uring based ACE Proactor backend and expanded + Proactor test coverage across the available backend implementations. + USER VISIBLE CHANGES BETWEEN ACE-6.5.23 and ACE-6.5.24 ====================================================== diff --git a/ACE/ace/Asynch_IO.cpp b/ACE/ace/Asynch_IO.cpp index f50cdc33d310f..aff06b720604f 100644 --- a/ACE/ace/Asynch_IO.cpp +++ b/ACE/ace/Asynch_IO.cpp @@ -200,7 +200,6 @@ ACE_Asynch_Read_Stream::read (ACE_Message_Block &message_block, signal_number); } -#if defined (ACE_HAS_WIN32_OVERLAPPED_IO) int ACE_Asynch_Read_Stream::readv (ACE_Message_Block &message_block, size_t bytes_to_read, @@ -219,7 +218,6 @@ ACE_Asynch_Read_Stream::readv (ACE_Message_Block &message_block, priority, signal_number); } -#endif /* ACE_HAS_WIN32_OVERLAPPED_IO */ ACE_Asynch_Operation_Impl * ACE_Asynch_Read_Stream::implementation (void) const @@ -318,7 +316,6 @@ ACE_Asynch_Write_Stream::write (ACE_Message_Block &message_block, signal_number); } -#if defined (ACE_HAS_WIN32_OVERLAPPED_IO) int ACE_Asynch_Write_Stream::writev (ACE_Message_Block &message_block, size_t bytes_to_write, @@ -337,7 +334,6 @@ ACE_Asynch_Write_Stream::writev (ACE_Message_Block &message_block, priority, signal_number); } -#endif /* ACE_HAS_WIN32_OVERLAPPED_IO */ ACE_Asynch_Operation_Impl * ACE_Asynch_Write_Stream::implementation (void) const @@ -440,7 +436,6 @@ ACE_Asynch_Read_File::read (ACE_Message_Block &message_block, signal_number); } -#if (defined (ACE_WIN32) && !defined (ACE_HAS_WINCE)) int ACE_Asynch_Read_File::readv (ACE_Message_Block &message_block, size_t bytes_to_read, @@ -463,7 +458,6 @@ ACE_Asynch_Read_File::readv (ACE_Message_Block &message_block, priority, signal_number); } -#endif /* (defined (ACE_WIN32) && !defined (ACE_HAS_WINCE)) */ ACE_Asynch_Operation_Impl * ACE_Asynch_Read_File::implementation (void) const @@ -548,7 +542,6 @@ ACE_Asynch_Write_File::write (ACE_Message_Block &message_block, signal_number); } -#if (defined (ACE_WIN32) && !defined (ACE_HAS_WINCE)) int ACE_Asynch_Write_File::writev (ACE_Message_Block &message_block, size_t bytes_to_write, @@ -571,7 +564,6 @@ ACE_Asynch_Write_File::writev (ACE_Message_Block &message_block, priority, signal_number); } -#endif /* (defined (ACE_WIN32) && !defined (ACE_HAS_WINCE)) */ ACE_Asynch_Operation_Impl * ACE_Asynch_Write_File::implementation (void) const diff --git a/ACE/ace/Asynch_IO.h b/ACE/ace/Asynch_IO.h index 60cc3f172769f..b01c4b303d5f0 100644 --- a/ACE/ace/Asynch_IO.h +++ b/ACE/ace/Asynch_IO.h @@ -342,7 +342,6 @@ class ACE_Export ACE_Asynch_Read_Stream : public ACE_Asynch_Operation int priority = 0, int signal_number = ACE_SIGRTMIN); -#if defined (ACE_HAS_WIN32_OVERLAPPED_IO) /** * Same as above but with scatter support, through chaining of composite * message blocks using the continuation field. @@ -352,7 +351,6 @@ class ACE_Export ACE_Asynch_Read_Stream : public ACE_Asynch_Operation const void *act = 0, int priority = 0, int signal_number = ACE_SIGRTMIN); -#endif /* defined (ACE_HAS_WIN32_OVERLAPPED_IO) */ /// Return the underlying implementation class. // (this should be protected...) @@ -380,6 +378,7 @@ class ACE_Export ACE_Asynch_Read_Stream : public ACE_Asynch_Operation /// class. friend class ACE_POSIX_Asynch_Read_Stream_Result; friend class ACE_WIN32_Asynch_Read_Stream_Result; + friend class ACE_Uring_Asynch_Read_Stream_Result; public: /// The number of bytes which were requested at the start of the @@ -496,7 +495,6 @@ class ACE_Export ACE_Asynch_Write_Stream : public ACE_Asynch_Operation int priority = 0, int signal_number = ACE_SIGRTMIN); -#if defined (ACE_HAS_WIN32_OVERLAPPED_IO) /** * Same as above but with gather support, through chaining of composite * message blocks using the continuation field. @@ -506,7 +504,6 @@ class ACE_Export ACE_Asynch_Write_Stream : public ACE_Asynch_Operation const void *act = 0, int priority = 0, int signal_number = ACE_SIGRTMIN); -#endif /* defined (ACE_HAS_WIN32_OVERLAPPED_IO) */ /// Return the underlying implementation class. /// @todo (this should be protected...) @@ -534,6 +531,7 @@ class ACE_Export ACE_Asynch_Write_Stream : public ACE_Asynch_Operation /// class. friend class ACE_POSIX_Asynch_Write_Stream_Result; friend class ACE_WIN32_Asynch_Write_Stream_Result; + friend class ACE_Uring_Asynch_Write_Stream_Result; public: /// The number of bytes which were requested at the start of the @@ -639,7 +637,6 @@ class ACE_Export ACE_Asynch_Read_File : public ACE_Asynch_Read_Stream int priority = 0, int signal_number = ACE_SIGRTMIN); -#if (defined (ACE_WIN32) && !defined (ACE_HAS_WINCE)) /** * Same as above but with scatter support, through chaining of composite * message blocks using the continuation field. @@ -653,7 +650,6 @@ class ACE_Export ACE_Asynch_Read_File : public ACE_Asynch_Read_Stream const void *act = 0, int priority = 0, int signal_number = ACE_SIGRTMIN); -#endif /* (defined (ACE_WIN32) && !defined (ACE_HAS_WINCE)) */ /// Return the underlying implementation class. // (this should be protected...) @@ -688,6 +684,7 @@ class ACE_Export ACE_Asynch_Read_File : public ACE_Asynch_Read_Stream /// class. friend class ACE_POSIX_Asynch_Read_File_Result; friend class ACE_WIN32_Asynch_Read_File_Result; + friend class ACE_Uring_Asynch_Read_File_Result; public: /// Get the implementation class. @@ -774,7 +771,6 @@ class ACE_Export ACE_Asynch_Write_File : public ACE_Asynch_Write_Stream int priority = 0, int signal_number = ACE_SIGRTMIN); -#if (defined (ACE_WIN32) && !defined (ACE_HAS_WINCE)) /** * Same as above but with gather support, through chaining of composite * message blocks using the continuation field. @@ -788,7 +784,6 @@ class ACE_Export ACE_Asynch_Write_File : public ACE_Asynch_Write_Stream const void *act = 0, int priority = 0, int signal_number = ACE_SIGRTMIN); -#endif /* (defined (ACE_WIN32) && !defined (ACE_HAS_WINCE)) */ /// Return the underlying implementation class. // (this should be protected...) @@ -822,6 +817,7 @@ class ACE_Export ACE_Asynch_Write_File : public ACE_Asynch_Write_Stream /// class. friend class ACE_POSIX_Asynch_Write_File_Result; friend class ACE_WIN32_Asynch_Write_File_Result; + friend class ACE_Uring_Asynch_Write_File_Result; public: /// Get the implementation class. @@ -952,6 +948,7 @@ class ACE_Export ACE_Asynch_Accept : public ACE_Asynch_Operation /// class. friend class ACE_POSIX_Asynch_Accept_Result; friend class ACE_WIN32_Asynch_Accept_Result; + friend class ACE_Uring_Asynch_Accept_Result; public: /// The number of bytes which were requested at the start of the @@ -1056,6 +1053,7 @@ class ACE_Export ACE_Asynch_Connect : public ACE_Asynch_Operation /// class. friend class ACE_POSIX_Asynch_Connect_Result; friend class ACE_WIN32_Asynch_Connect_Result; + friend class ACE_Uring_Asynch_Connect_Result; public: @@ -1180,6 +1178,7 @@ class ACE_Export ACE_Asynch_Transmit_File : public ACE_Asynch_Operation /// class. friend class ACE_POSIX_Asynch_Transmit_File_Result; friend class ACE_WIN32_Asynch_Transmit_File_Result; + friend class ACE_Uring_Asynch_Transmit_File_Result; public: /// Socket used for transmitting the file. @@ -1390,6 +1389,7 @@ class ACE_Export ACE_Asynch_Read_Dgram : public ACE_Asynch_Operation /// class. friend class ACE_POSIX_Asynch_Read_Dgram_Result; friend class ACE_WIN32_Asynch_Read_Dgram_Result; + friend class ACE_Uring_Asynch_Read_Dgram_Result; public: /// The number of bytes which were requested at the start of the @@ -1523,6 +1523,7 @@ class ACE_Export ACE_Asynch_Write_Dgram : public ACE_Asynch_Operation /// class. friend class ACE_POSIX_Asynch_Write_Dgram_Result; friend class ACE_WIN32_Asynch_Write_Dgram_Result; + friend class ACE_Uring_Asynch_Write_Dgram_Result; public: /// The number of bytes which were requested at the start of the diff --git a/ACE/ace/Asynch_IO_Impl.cpp b/ACE/ace/Asynch_IO_Impl.cpp index 1f25a1ee7b918..b711927a6ffbe 100644 --- a/ACE/ace/Asynch_IO_Impl.cpp +++ b/ACE/ace/Asynch_IO_Impl.cpp @@ -4,6 +4,8 @@ // This only works on Win32 platforms and on Unix platforms supporting // aio calls. +#include "ace/os_include/os_errno.h" + #if !defined (__ACE_INLINE__) #include "ace/Asynch_IO_Impl.inl" #endif /* __ACE_INLINE__ */ @@ -22,6 +24,16 @@ ACE_Asynch_Read_Stream_Impl::~ACE_Asynch_Read_Stream_Impl (void) { } +int +ACE_Asynch_Read_Stream_Impl::readv (ACE_Message_Block &/*message_block*/, + size_t /*bytes_to_read*/, + const void */*act*/, + int /*priority*/, + int /*signal_number*/) +{ + ACE_NOTSUP_RETURN (-1); +} + ACE_Asynch_Read_Stream_Result_Impl::~ACE_Asynch_Read_Stream_Result_Impl (void) { } @@ -30,6 +42,16 @@ ACE_Asynch_Write_Stream_Impl::~ACE_Asynch_Write_Stream_Impl (void) { } +int +ACE_Asynch_Write_Stream_Impl::writev (ACE_Message_Block &/*message_block*/, + size_t /*bytes_to_write*/, + const void */*act*/, + int /*priority*/, + int /*signal_number*/) +{ + ACE_NOTSUP_RETURN (-1); +} + ACE_Asynch_Write_Stream_Result_Impl::~ACE_Asynch_Write_Stream_Result_Impl (void) { } @@ -38,10 +60,66 @@ ACE_Asynch_Read_File_Impl::~ACE_Asynch_Read_File_Impl (void) { } +int +ACE_Asynch_Read_File_Impl::readv (ACE_Message_Block &/*message_block*/, + size_t /*bytes_to_read*/, + u_long /*offset*/, + u_long /*offset_high*/, + const void */*act*/, + int /*priority*/, + int /*signal_number*/) +{ + ACE_NOTSUP_RETURN (-1); +} + +int +ACE_Asynch_Read_File_Impl::readv (ACE_Message_Block &message_block, + size_t bytes_to_read, + const void *act, + int priority, + int signal_number) +{ + return this->readv (message_block, + bytes_to_read, + 0, + 0, + act, + priority, + signal_number); +} + ACE_Asynch_Write_File_Impl::~ACE_Asynch_Write_File_Impl (void) { } +int +ACE_Asynch_Write_File_Impl::writev (ACE_Message_Block &/*message_block*/, + size_t /*bytes_to_write*/, + u_long /*offset*/, + u_long /*offset_high*/, + const void */*act*/, + int /*priority*/, + int /*signal_number*/) +{ + ACE_NOTSUP_RETURN (-1); +} + +int +ACE_Asynch_Write_File_Impl::writev (ACE_Message_Block &message_block, + size_t bytes_to_write, + const void *act, + int priority, + int signal_number) +{ + return this->writev (message_block, + bytes_to_write, + 0, + 0, + act, + priority, + signal_number); +} + ACE_Asynch_Read_File_Result_Impl::~ACE_Asynch_Read_File_Result_Impl (void) { } diff --git a/ACE/ace/Asynch_IO_Impl.h b/ACE/ace/Asynch_IO_Impl.h index a4ec996df1918..149107fd66b30 100644 --- a/ACE/ace/Asynch_IO_Impl.h +++ b/ACE/ace/Asynch_IO_Impl.h @@ -160,7 +160,6 @@ class ACE_Export ACE_Asynch_Read_Stream_Impl : public virtual ACE_Asynch_Operati int priority, int signal_number) = 0; -#if (defined (ACE_WIN32) && !defined (ACE_HAS_WINCE)) /** * Same as above but with scatter support, through chaining of composite * message blocks using the continuation field. @@ -169,8 +168,7 @@ class ACE_Export ACE_Asynch_Read_Stream_Impl : public virtual ACE_Asynch_Operati size_t bytes_to_read, const void *act, int priority, - int signal_number) = 0; -#endif /* (defined (ACE_WIN32) && !defined (ACE_HAS_WINCE)) */ + int signal_number); protected: /// Do-nothing constructor. @@ -224,7 +222,6 @@ class ACE_Export ACE_Asynch_Write_Stream_Impl : public virtual ACE_Asynch_Operat int priority, int signal_number) = 0; -#if (defined (ACE_WIN32) && !defined (ACE_HAS_WINCE)) /** * Same as above but with gather support, through chaining of composite * message blocks using the continuation field. @@ -233,8 +230,7 @@ class ACE_Export ACE_Asynch_Write_Stream_Impl : public virtual ACE_Asynch_Operat size_t bytes_to_write, const void *act, int priority, - int signal_number) = 0; -#endif /* (defined (ACE_WIN32) && !defined (ACE_HAS_WINCE)) */ + int signal_number); protected: /// Do-nothing constructor. @@ -293,7 +289,6 @@ class ACE_Export ACE_Asynch_Read_File_Impl : public virtual ACE_Asynch_Read_Stre int priority, int signal_number) = 0; -#if (defined (ACE_WIN32) && !defined (ACE_HAS_WINCE)) /** * Same as above but with scatter support, through chaining of composite * message blocks using the continuation field. @@ -306,8 +301,7 @@ class ACE_Export ACE_Asynch_Read_File_Impl : public virtual ACE_Asynch_Read_Stre u_long offset_high, const void *act, int priority, - int signal_number) = 0; -#endif /* (defined (ACE_WIN32) && !defined (ACE_HAS_WINCE)) */ + int signal_number); /// This starts off an asynchronous read. Upto @a bytes_to_read will /// be read and stored in the @a message_block. @@ -317,7 +311,6 @@ class ACE_Export ACE_Asynch_Read_File_Impl : public virtual ACE_Asynch_Read_Stre int priority, int signal_number) = 0; -#if (defined (ACE_WIN32) && !defined (ACE_HAS_WINCE)) /** * Same as above but with scatter support, through chaining of composite * message blocks using the continuation field. @@ -326,8 +319,7 @@ class ACE_Export ACE_Asynch_Read_File_Impl : public virtual ACE_Asynch_Read_Stre size_t bytes_to_read, const void *act, int priority, - int signal_number) = 0; -#endif /* (defined (ACE_WIN32) && !defined (ACE_HAS_WINCE)) */ + int signal_number); protected: /// Do-nothing constructor. @@ -376,7 +368,6 @@ class ACE_Export ACE_Asynch_Write_File_Impl : public virtual ACE_Asynch_Write_St int priority, int signal_number) = 0; -#if (defined (ACE_WIN32) && !defined (ACE_HAS_WINCE)) /** * Same as above but with gather support, through chaining of composite * message blocks using the continuation field. @@ -389,8 +380,7 @@ class ACE_Export ACE_Asynch_Write_File_Impl : public virtual ACE_Asynch_Write_St u_long offset_high, const void *act, int priority, - int signal_number) = 0; -#endif /* (defined (ACE_WIN32) && !defined (ACE_HAS_WINCE)) */ + int signal_number); /// This starts off an asynchronous write. Upto @a bytes_to_write /// will be written from the @a message_block. @@ -400,7 +390,6 @@ class ACE_Export ACE_Asynch_Write_File_Impl : public virtual ACE_Asynch_Write_St int priority, int signal_number) = 0; -#if (defined (ACE_WIN32) && !defined (ACE_HAS_WINCE)) /** * Same as above but with gather support, through chaining of composite * message blocks using the continuation field. @@ -409,8 +398,7 @@ class ACE_Export ACE_Asynch_Write_File_Impl : public virtual ACE_Asynch_Write_St size_t bytes_to_write, const void *act, int priority, - int signal_number) = 0; -#endif /* (defined (ACE_WIN32) && !defined (ACE_HAS_WINCE)) */ + int signal_number); protected: /// Do-nothing constructor. diff --git a/ACE/ace/Asynch_Pseudo_Task.cpp b/ACE/ace/Asynch_Pseudo_Task.cpp index 0b6a14b60e549..841a996c487bb 100644 --- a/ACE/ace/Asynch_Pseudo_Task.cpp +++ b/ACE/ace/Asynch_Pseudo_Task.cpp @@ -7,7 +7,8 @@ ACE_BEGIN_VERSIONED_NAMESPACE_DECL ACE_Asynch_Pseudo_Task::ACE_Asynch_Pseudo_Task () : select_reactor_ (), // should be initialized before reactor_ - reactor_ (&select_reactor_, 0) // don't delete implementation + reactor_ (&select_reactor_, 0), // don't delete implementation + started_ (false) { } @@ -19,19 +20,26 @@ ACE_Asynch_Pseudo_Task::~ACE_Asynch_Pseudo_Task () int ACE_Asynch_Pseudo_Task::start (void) { + if (this->started_.value ()) + return 0; + if (this->reactor_.initialized () == 0) ACELIB_ERROR_RETURN ((LM_ERROR, ACE_TEXT ("%N:%l:%p\n"), ACE_TEXT ("start reactor is not initialized")), -1); - return this->activate () == -1 ? -1 : 0; // If started, return 0 + if (this->activate () == -1) + return -1; + + this->started_ = true; + return 0; } int ACE_Asynch_Pseudo_Task::stop (void) { - if (this->thr_count () == 0) // already stopped + if (!this->started_.value ()) // already stopped return 0; if (this->reactor_.end_reactor_event_loop () == -1) @@ -39,6 +47,7 @@ ACE_Asynch_Pseudo_Task::stop (void) this->wait (); this->reactor_.close (); + this->started_ = false; return 0; } diff --git a/ACE/ace/Asynch_Pseudo_Task.h b/ACE/ace/Asynch_Pseudo_Task.h index 4337361fe4440..743b7104f1d45 100644 --- a/ACE/ace/Asynch_Pseudo_Task.h +++ b/ACE/ace/Asynch_Pseudo_Task.h @@ -22,6 +22,7 @@ #include "ace/Reactor.h" #include "ace/Select_Reactor.h" #include "ace/Task.h" +#include "ace/Atomic_Op.h" ACE_BEGIN_VERSIONED_NAMESPACE_DECL @@ -61,8 +62,10 @@ class ACE_Export ACE_Asynch_Pseudo_Task : public ACE_Task ACE_Select_Reactor select_reactor_; ACE_Reactor reactor_; + ACE_Atomic_Op started_; }; + ACE_END_VERSIONED_NAMESPACE_DECL #include /**/ "ace/post.h" diff --git a/ACE/ace/Framework_Component.cpp b/ACE/ace/Framework_Component.cpp index 513d378b94356..98f2089d31d37 100644 --- a/ACE/ace/Framework_Component.cpp +++ b/ACE/ace/Framework_Component.cpp @@ -142,7 +142,7 @@ ACE_Framework_Repository::register_component (ACE_Framework_Component *fc) fc->this_ == this->component_vector_[i]->this_) { ACELIB_ERROR_RETURN ((LM_ERROR, - "AFR::register_component: error, compenent already registered\n"), + "AFR::register_component: error, component already registered\n"), -1); } diff --git a/ACE/ace/INET_Addr.h b/ACE/ace/INET_Addr.h index 29c87f8cf5551..a8cc75447d77f 100644 --- a/ACE/ace/INET_Addr.h +++ b/ACE/ace/INET_Addr.h @@ -398,7 +398,7 @@ class ACE_Export ACE_INET_Addr : public ACE_Addr // the underlying internet address structure. void *ip_addr_pointer (void) const; int ip_addr_size (void) const; - int determine_type (void) const; + static int determine_type (void); /// Initialize underlying inet_addr_ to default values void reset_i (void); diff --git a/ACE/ace/INET_Addr.inl b/ACE/ace/INET_Addr.inl index 9f2f83d9ad77d..01f2a10187b61 100644 --- a/ACE/ace/INET_Addr.inl +++ b/ACE/ace/INET_Addr.inl @@ -32,7 +32,7 @@ ACE_INET_Addr::reset_i (void) } ACE_INLINE int -ACE_INET_Addr::determine_type (void) const +ACE_INET_Addr::determine_type (void) { #if defined (ACE_HAS_IPV6) # if defined (ACE_USES_IPV4_IPV6_MIGRATION) diff --git a/ACE/ace/POSIX_Asynch_IO.cpp b/ACE/ace/POSIX_Asynch_IO.cpp index 2b29515b934fa..965919bcffd63 100644 --- a/ACE/ace/POSIX_Asynch_IO.cpp +++ b/ACE/ace/POSIX_Asynch_IO.cpp @@ -94,6 +94,17 @@ ACE_POSIX_Asynch_Result::post_completion (ACE_Proactor_Impl *proactor_impl) // Get to the platform specific implementation. ACE_POSIX_Proactor *posix_proactor = dynamic_cast (proactor_impl); + // Some runtimes have trouble with the direct cast from the abstract + // proactor interface to the POSIX base for callback-based proactors, + // even though the narrower AIOCB path is still valid. + if (posix_proactor == 0) + { + ACE_POSIX_AIOCB_Proactor *aiocb_proactor = + dynamic_cast (proactor_impl); + if (aiocb_proactor != 0) + posix_proactor = aiocb_proactor; + } + if (posix_proactor == 0) ACELIB_ERROR_RETURN ((LM_ERROR, "Dynamic cast to POSIX Proactor failed\n"), -1); @@ -516,10 +527,10 @@ ACE_POSIX_Asynch_Read_File::read (ACE_Message_Block &message_block, int signal_number) { size_t space = message_block.space (); - if ( bytes_to_read > space ) + if (bytes_to_read > space) bytes_to_read=space; - if ( bytes_to_read == 0 ) + if (bytes_to_read == 0) ACELIB_ERROR_RETURN ((LM_ERROR, ACE_TEXT ("ACE_POSIX_Asynch_Read_File::read:") @@ -996,7 +1007,7 @@ ACE_POSIX_Asynch_Accept::close () ACE_TRACE ("ACE_POSIX_Asynch_Accept::close"); // 1. It performs cancellation of all pending requests - // 2. Removes itself from Reactor ( ACE_Asynch_Pseudo_Task) + // 2. Removes itself from Reactor (ACE_Asynch_Pseudo_Task) // 3. close the socket // // Parameter flg_notify can be @@ -1078,7 +1089,7 @@ ACE_POSIX_Asynch_Accept::handle_input (ACE_HANDLE /* fd */) ACELIB_ERROR ((LM_ERROR, ACE_TEXT("%N:%l:(%P | %t):%p\n"), ACE_TEXT("ACE_POSIX_Asynch_Accept::handle_input:") - ACE_TEXT( " dequeueing failed"))); + ACE_TEXT(" dequeueing failed"))); // Disable the handle in the reactor if no more accepts are pending. if (this->result_queue_.size () == 0) @@ -1345,7 +1356,7 @@ int ACE_POSIX_Asynch_Connect::post_result (ACE_POSIX_Asynch_Connect_Result * res // return code : // -1 errors before attempt to connect // 0 connect started -// 1 connect finished ( may be unsuccessfully) +// 1 connect finished (may be unsuccessfully) int ACE_POSIX_Asynch_Connect::connect_i (ACE_POSIX_Asynch_Connect_Result *result, @@ -1356,6 +1367,7 @@ ACE_POSIX_Asynch_Connect::connect_i (ACE_POSIX_Asynch_Connect_Result *result, result->set_bytes_transferred (0); ACE_HANDLE handle = result->connect_handle (); + bool created_handle = false; if (handle == ACE_INVALID_HANDLE) { @@ -1366,6 +1378,7 @@ ACE_POSIX_Asynch_Connect::connect_i (ACE_POSIX_Asynch_Connect_Result *result, 0); // save it result->connect_handle (handle); + created_handle = (handle != ACE_INVALID_HANDLE); if (handle == ACE_INVALID_HANDLE) { result->set_error (errno); @@ -1384,8 +1397,13 @@ ACE_POSIX_Asynch_Connect::connect_i (ACE_POSIX_Asynch_Connect_Result *result, SOL_SOCKET, SO_REUSEADDR, (const char*) &one, - sizeof one) == -1 ) + sizeof one) == -1) { + if (created_handle) + { + ACE_OS::closesocket (handle); + result->connect_handle (ACE_INVALID_HANDLE); + } result->set_error (errno); ACELIB_ERROR_RETURN ((LM_ERROR, @@ -1402,6 +1420,11 @@ ACE_POSIX_Asynch_Connect::connect_i (ACE_POSIX_Asynch_Connect_Result *result, if (ACE_OS::bind (handle, laddr, size) == -1) { + if (created_handle) + { + ACE_OS::closesocket (handle); + result->connect_handle (ACE_INVALID_HANDLE); + } result->set_error (errno); ACELIB_ERROR_RETURN ((LM_ERROR, @@ -1414,6 +1437,11 @@ ACE_POSIX_Asynch_Connect::connect_i (ACE_POSIX_Asynch_Connect_Result *result, // set non blocking mode if (ACE::set_flags (handle, ACE_NONBLOCK) != 0) { + if (created_handle) + { + ACE_OS::closesocket (handle); + result->connect_handle (ACE_INVALID_HANDLE); + } result->set_error (errno); ACELIB_ERROR_RETURN ((LM_ERROR, @@ -1436,6 +1464,11 @@ ACE_POSIX_Asynch_Connect::connect_i (ACE_POSIX_Asynch_Connect_Result *result, if (errno == EINTR) continue; + if (created_handle) + { + ACE_OS::closesocket (handle); + result->connect_handle (ACE_INVALID_HANDLE); + } result->set_error (errno); } @@ -1829,6 +1862,9 @@ ACE_POSIX_Asynch_Transmit_Handler::~ACE_POSIX_Asynch_Transmit_Handler (void) int ACE_POSIX_Asynch_Transmit_Handler::transmit (void) { + ACE_Asynch_Transmit_File::Header_And_Trailer *header_and_trailer = + this->result_->header_and_trailer (); + // No proactor is given for the 's. Because we are using the // concrete implementations of the Asynch_Operations, and we have // already given them the specific proactor, so they wont need the @@ -1852,9 +1888,15 @@ ACE_POSIX_Asynch_Transmit_Handler::transmit (void) "ACE_Asynch_Transmit_Handler:write_stream open failed\n"), -1); + // A plain file transmit may omit header/trailer altogether. + if (header_and_trailer == 0 + || header_and_trailer->header () == 0 + || header_and_trailer->header_bytes () == 0) + return this->initiate_read_file (); + // Transmit the header. - if (this->ws_.write (*this->result_->header_and_trailer ()->header (), - this->result_->header_and_trailer ()->header_bytes (), + if (this->ws_.write (*header_and_trailer->header (), + header_and_trailer->header_bytes (), reinterpret_cast (&this->header_act_), 0) == -1) ACELIB_ERROR_RETURN ((LM_ERROR, @@ -2007,12 +2049,30 @@ ACE_POSIX_Asynch_Transmit_Handler::handle_read_file (const ACE_Asynch_Read_File: int ACE_POSIX_Asynch_Transmit_Handler::initiate_read_file (void) { + ACE_Asynch_Transmit_File::Header_And_Trailer *header_and_trailer = + this->result_->header_and_trailer (); + // Is there something to read. if (this->file_offset_ >= this->file_size_) { + if (header_and_trailer == 0 + || header_and_trailer->trailer () == 0 + || header_and_trailer->trailer_bytes () == 0) + { + ACE_SEH_TRY + { + this->result_->complete (this->bytes_transferred_, 1, 0, 0); + } + ACE_SEH_FINALLY + { + delete this; + } + return 0; + } + // File is sent. Send the trailer. - if (this->ws_.write (*this->result_->header_and_trailer ()->trailer (), - this->result_->header_and_trailer ()->trailer_bytes (), + if (this->ws_.write (*header_and_trailer->trailer (), + header_and_trailer->trailer_bytes (), (void *)&this->trailer_act_, this->result_->priority (), this->result_->signal_number ()) == -1) diff --git a/ACE/ace/POSIX_CB_Proactor.cpp b/ACE/ace/POSIX_CB_Proactor.cpp index 9be836cc8143c..1f1ba0bc7848e 100644 --- a/ACE/ace/POSIX_CB_Proactor.cpp +++ b/ACE/ace/POSIX_CB_Proactor.cpp @@ -9,11 +9,115 @@ ACE_BEGIN_VERSIONED_NAMESPACE_DECL +ACE_POSIX_CB_Proactor::Notification_State::Notification_State (ACE_SYNCH_SEMAPHORE &sema) + : zero_pending_ (this->mutex_), + sema_ (&sema), + pending_callbacks_ (0), + ref_count_ (0) +{ +} + +void +ACE_POSIX_CB_Proactor::Notification_State::add_pending (void) +{ + ACE_GUARD (ACE_Thread_Mutex, ace_mon, this->mutex_); + ++this->ref_count_; + ++this->pending_callbacks_; +} + +void +ACE_POSIX_CB_Proactor::Notification_State::complete_one (void) +{ + this->finish_pending_i (true); +} + +void +ACE_POSIX_CB_Proactor::Notification_State::abandon_pending (void) +{ + this->finish_pending_i (false); +} + +size_t +ACE_POSIX_CB_Proactor::Notification_State::pending (void) +{ + ACE_GUARD_RETURN (ACE_Thread_Mutex, ace_mon, this->mutex_, 0); + return this->pending_callbacks_; +} + +int +ACE_POSIX_CB_Proactor::Notification_State::wait_for_pending_zero (const ACE_Time_Value *abstime) +{ + ACE_GUARD_RETURN (ACE_Thread_Mutex, ace_mon, this->mutex_, -1); + + while (this->pending_callbacks_ != 0) + if (this->zero_pending_.wait (abstime) == -1) + return -1; + + return 0; +} + +void +ACE_POSIX_CB_Proactor::Notification_State::add_ref (void) +{ + ACE_GUARD (ACE_Thread_Mutex, ace_mon, this->mutex_); + ++this->ref_count_; +} + +void +ACE_POSIX_CB_Proactor::Notification_State::remove_ref (void) +{ + bool destroy = false; + { + ACE_GUARD (ACE_Thread_Mutex, ace_mon, this->mutex_); + if (this->ref_count_ != 0 && --this->ref_count_ == 0) + destroy = true; + } + + if (destroy) + delete this; +} + +void +ACE_POSIX_CB_Proactor::Notification_State::detach (void) +{ + ACE_GUARD (ACE_Thread_Mutex, ace_mon, this->mutex_); + this->sema_ = 0; +} + +void +ACE_POSIX_CB_Proactor::Notification_State::finish_pending_i (bool signal_waiter) +{ + bool destroy = false; + { + ACE_GUARD (ACE_Thread_Mutex, ace_mon, this->mutex_); + + if (signal_waiter && this->sema_ != 0) + this->sema_->release (); + + if (this->pending_callbacks_ != 0) + { + --this->pending_callbacks_; + if (this->pending_callbacks_ == 0) + this->zero_pending_.broadcast (); + } + + if (this->ref_count_ != 0 && --this->ref_count_ == 0) + destroy = true; + } + + if (destroy) + delete this; +} + ACE_POSIX_CB_Proactor::ACE_POSIX_CB_Proactor (size_t max_aio_operations) : ACE_POSIX_AIOCB_Proactor (max_aio_operations, ACE_POSIX_Proactor::PROACTOR_CB), - sema_ ((unsigned int) 0) + sema_ ((unsigned int) 0), + notification_state_ (0) { + ACE_NEW (this->notification_state_, Notification_State (this->sema_)); + this->notification_state_->add_ref (); + // we should start pseudo-asynchronous accept task // one per all future acceptors @@ -34,9 +138,10 @@ ACE_POSIX_CB_Proactor::get_impl_type (void) void ACE_POSIX_CB_Proactor::aio_completion_func (sigval cb_data) { - ACE_POSIX_CB_Proactor * impl = static_cast (cb_data.sival_ptr); - if ( impl != 0 ) - impl->notify_completion (0); + Notification_State *state = + static_cast (cb_data.sival_ptr); + if (state != 0) + state->complete_one (); } #if defined (ACE_HAS_SIG_C_FUNC) @@ -47,6 +152,27 @@ ACE_POSIX_CB_Proactor_aio_completion (sigval cb_data) } #endif /* ACE_HAS_SIG_C_FUNC */ +int +ACE_POSIX_CB_Proactor::close (void) +{ + int const result = ACE_POSIX_AIOCB_Proactor::close (); + + Notification_State *state = this->notification_state_; + if (state != 0) + { + this->notification_state_ = 0; + + ACE_Time_Value const settle_timeout (0, 500000); + ACE_Time_Value const deadline = + ACE_OS::gettimeofday () + settle_timeout; + (void) state->wait_for_pending_zero (&deadline); + state->detach (); + state->remove_ref (); + } + + return result; +} + int ACE_POSIX_CB_Proactor::handle_events (ACE_Time_Value &wait_time) { @@ -62,13 +188,29 @@ ACE_POSIX_CB_Proactor::handle_events (void) } int -ACE_POSIX_CB_Proactor::notify_completion (int sig_num) +ACE_POSIX_CB_Proactor::notify_completion (int /* sig_num */) { - ACE_UNUSED_ARG (sig_num); - return this->sema_.release(); } +int +ACE_POSIX_CB_Proactor::post_completion (ACE_POSIX_Asynch_Result *result) +{ + ACE_MT (ACE_GUARD_RETURN (ACE_SYNCH_MUTEX, ace_mon, this->mutex_, -1)); + + int const rc = this->putq_result (result); + if (rc == 0) + this->sema_.release (); + return rc; +} + +void +ACE_POSIX_CB_Proactor::abandon_pending_aio (void) +{ + if (this->notification_state_ != 0) + this->notification_state_->abandon_pending (); +} + ssize_t ACE_POSIX_CB_Proactor::allocate_aio_slot (ACE_POSIX_Asynch_Result *result) @@ -90,7 +232,7 @@ ACE_POSIX_CB_Proactor::allocate_aio_slot (ACE_POSIX_Asynch_Result *result) # endif /* ACE_HAS_SIG_C_FUNC */ result->aio_sigevent.sigev_notify_attributes = 0; - result->aio_sigevent.sigev_value.sival_ptr = this ; + result->aio_sigevent.sigev_value.sival_ptr = this->notification_state_; return slot; } @@ -160,15 +302,77 @@ ACE_POSIX_CB_Proactor::handle_events_i (u_long milli_seconds) // process post_completed results ret_que = this->process_result_queue (); - // Uncomment this if you want to test - // and research the behavior of you system - // ACELIB_DEBUG ((LM_DEBUG, - // "(%t) NumAIO=%d NumQueue=%d\n", - // ret_aio, ret_que)); return ret_aio + ret_que > 0 ? 1 : 0; } +int +ACE_POSIX_CB_Proactor::start_aio (ACE_POSIX_Asynch_Result *result, + ACE_POSIX_Proactor::Opcode op) +{ + ACE_MT (ACE_GUARD_RETURN (ACE_Thread_Mutex, ace_mon, this->mutex_, -1)); + + int ret_val = (aiocb_list_cur_size_ >= aiocb_list_max_size_) ? -1 : 0; + + if (result == 0) + return ret_val; + + switch (op) + { + case ACE_POSIX_Proactor::ACE_OPCODE_READ: + result->aio_lio_opcode = LIO_READ; + break; + + case ACE_POSIX_Proactor::ACE_OPCODE_WRITE: + result->aio_lio_opcode = LIO_WRITE; + break; + + default: + ACELIB_ERROR_RETURN ((LM_ERROR, + ACE_TEXT ("%N:%l:(%P|%t)::") + ACE_TEXT ("start_aio: Invalid op code %d\n"), + op), + -1); + } + + if (ret_val != 0) + { + errno = EAGAIN; + return -1; + } + + ssize_t const slot = this->allocate_aio_slot (result); + if (slot < 0) + return -1; + + size_t const index = static_cast (slot); + this->result_list_[index] = result; + ++this->aiocb_list_cur_size_; + + if (this->notification_state_ != 0) + this->notification_state_->add_pending (); + + ret_val = this->start_aio_i (result); + switch (ret_val) + { + case 0: + this->aiocb_list_[index] = result; + return 0; + + case 1: + ++this->num_deferred_aiocb_; + return 0; + + default: + this->abandon_pending_aio (); + break; + } + + this->result_list_[index] = 0; + --this->aiocb_list_cur_size_; + return -1; +} + ACE_END_VERSIONED_NAMESPACE_DECL #endif /* ACE_HAS_AIO_CALLS && !ACE_HAS_BROKEN_SIGEVENT_STRUCT */ diff --git a/ACE/ace/POSIX_CB_Proactor.h b/ACE/ace/POSIX_CB_Proactor.h index e38360f5f9fbd..4bc762ef91b49 100644 --- a/ACE/ace/POSIX_CB_Proactor.h +++ b/ACE/ace/POSIX_CB_Proactor.h @@ -19,8 +19,10 @@ #if defined (ACE_HAS_AIO_CALLS) +#include "ace/Condition_Thread_Mutex.h" #include "ace/Synch_Traits.h" #include "ace/Thread_Semaphore.h" +#include "ace/Thread_Mutex.h" #include "ace/Null_Semaphore.h" #include "ace/POSIX_Proactor.h" @@ -31,11 +33,51 @@ ACE_BEGIN_VERSIONED_NAMESPACE_DECL /** * @class ACE_POSIX_CB_Proactor * - * @brief Implementation of Callback-based Proactor - * }; + * @brief Implementation of callback-based Proactor. */ class ACE_Export ACE_POSIX_CB_Proactor : public ACE_POSIX_AIOCB_Proactor { + /// Tracks pending callback notifications during shutdown. + class Notification_State + { + public: + /// Construct the state object using the callback startup semaphore. + explicit Notification_State (ACE_SYNCH_SEMAPHORE &sema); + + /// Record a newly submitted callback that must finish before close(). + void add_pending (void); + + /// Mark one pending callback as completed. + void complete_one (void); + + /// Remove one pending callback that was abandoned before completion. + void abandon_pending (void); + + /// Return the number of callbacks still expected to complete. + size_t pending (void); + + /// Wait until the pending callback count reaches zero. + int wait_for_pending_zero (const ACE_Time_Value *abstime); + + /// Hold an internal reference while a callback may still access the state. + void add_ref (void); + + /// Release a callback lifetime reference. + void remove_ref (void); + + /// Detach the startup semaphore once callback submission is stable. + void detach (void); + + private: + /// Finalize one pending callback update and optionally wake the waiter. + void finish_pending_i (bool signal_waiter); + + ACE_Thread_Mutex mutex_; + ACE_Condition_Thread_Mutex zero_pending_; + ACE_SYNCH_SEMAPHORE *sema_; + size_t pending_callbacks_; + size_t ref_count_; + }; public: virtual Proactor_Type get_impl_type (void); @@ -47,8 +89,10 @@ class ACE_Export ACE_POSIX_CB_Proactor : public ACE_POSIX_AIOCB_Proactor /// be started at the same time. ACE_POSIX_CB_Proactor (size_t max_aio_operations = ACE_AIO_DEFAULT_SIZE); - // This only public so the "extern C" completion function can see it - // when needed. + /// Close down the Proactor. + virtual int close (void); + + /// Static completion trampoline used by the POSIX sigevent callback API. static void aio_completion_func (sigval cb_data); protected: @@ -73,10 +117,20 @@ class ACE_Export ACE_POSIX_CB_Proactor : public ACE_POSIX_AIOCB_Proactor /// Find free slot to store result and aiocb pointer virtual ssize_t allocate_aio_slot (ACE_POSIX_Asynch_Result *result); + /// Initiate an aio operation and track callback lifetime. + virtual int start_aio (ACE_POSIX_Asynch_Result *result, + ACE_POSIX_Proactor::Opcode op); + /// Notify queue of "post_completed" ACE_POSIX_Asynch_Results /// called from post_completion method virtual int notify_completion (int sig_num); + /// Post a result without instantiating the AIOCB notify pipe. + virtual int post_completion (ACE_POSIX_Asynch_Result *result); + + /// Drop the outstanding callback count for abandoned operations. + virtual void abandon_pending_aio (void); + /** * Dispatch a single set of events. If @a milli_seconds elapses * before any events occur, return 0. Return 1 if a completion is @@ -87,6 +141,9 @@ class ACE_Export ACE_POSIX_CB_Proactor : public ACE_POSIX_AIOCB_Proactor /// Semaphore variable to notify /// used to wait the first AIO start ACE_SYNCH_SEMAPHORE sema_; + + /// Shared callback lifetime state for in-flight completions. + Notification_State *notification_state_; }; ACE_END_VERSIONED_NAMESPACE_DECL diff --git a/ACE/ace/POSIX_Proactor.cpp b/ACE/ace/POSIX_Proactor.cpp index e3414396b3abb..3a8f3d4810ef3 100644 --- a/ACE/ace/POSIX_Proactor.cpp +++ b/ACE/ace/POSIX_Proactor.cpp @@ -57,8 +57,11 @@ class ACE_POSIX_Wakeup_Completion : public ACE_POSIX_Asynch_Result // ********************************************************************* ACE_POSIX_Proactor::ACE_POSIX_Proactor (void) - : os_id_ (ACE_OS_UNDEFINED) + : os_id_ (ACE_OS_UNDEFINED), + pseudo_task_ (0) { + ACE_NEW (this->pseudo_task_, ACE_Asynch_Pseudo_Task); + #if defined(sun) os_id_ = ACE_OS_SUN; // set family @@ -99,6 +102,9 @@ ACE_POSIX_Proactor::~ACE_POSIX_Proactor (void) int ACE_POSIX_Proactor::close (void) { + delete this->pseudo_task_; + this->pseudo_task_ = 0; + return 0; } @@ -539,12 +545,13 @@ int ACE_POSIX_Proactor::post_wakeup_completions (int how_many) { ACE_POSIX_Wakeup_Completion *wakeup_completion = 0; + ACE_Handler::Proxy_Ptr null_handler_proxy; for (int ci = 0; ci < how_many; ci++) { ACE_NEW_RETURN (wakeup_completion, - ACE_POSIX_Wakeup_Completion (this->wakeup_handler_.proxy ()), + ACE_POSIX_Wakeup_Completion (null_handler_proxy), -1); if (this->post_completion (wakeup_completion) == -1) return -1; @@ -757,6 +764,15 @@ ACE_POSIX_AIOCB_Proactor::ACE_POSIX_AIOCB_Proactor (size_t max_aio_operations) // Check for correct value for max_aio_operations check_max_aio_num (); +#if defined (__GLIBC__) + aioinit init; + ACE_OS::memset (&init, 0, sizeof (init)); + init.aio_threads = static_cast (this->aiocb_list_max_size_ > 16 ? 16 + : this->aiocb_list_max_size_); + init.aio_num = static_cast (this->aiocb_list_max_size_); + aio_init (&init); +#endif /* __GLIBC__ */ + this->create_result_aiocb_list (); this->create_notify_manager (); @@ -782,6 +798,15 @@ ACE_POSIX_AIOCB_Proactor::ACE_POSIX_AIOCB_Proactor (size_t max_aio_operations, //check for correct value for max_aio_operations this->check_max_aio_num (); +#if defined (__GLIBC__) + aioinit init; + ACE_OS::memset (&init, 0, sizeof (init)); + init.aio_threads = static_cast (this->aiocb_list_max_size_ > 16 ? 16 + : this->aiocb_list_max_size_); + init.aio_num = static_cast (this->aiocb_list_max_size_); + aio_init (&init); +#endif /* __GLIBC__ */ + this->create_result_aiocb_list (); // @@ We should create Notify_Pipe_Manager in the derived class to @@ -816,7 +841,7 @@ ACE_POSIX_AIOCB_Proactor::close (void) void ACE_POSIX_AIOCB_Proactor::set_notify_handle (ACE_HANDLE h) { - notify_pipe_read_handle_ = h; + this->notify_pipe_read_handle_ = h; } int ACE_POSIX_AIOCB_Proactor::create_result_aiocb_list (void) @@ -831,7 +856,7 @@ int ACE_POSIX_AIOCB_Proactor::create_result_aiocb_list (void) -1); // Initialize the array. - for (size_t ai = 0; ai < this->aiocb_list_max_size_; ai++) + for (size_t ai = 0; ai < this->aiocb_list_max_size_; ++ai) { aiocb_list_[ai] = 0; result_list_[ai] = 0; @@ -847,30 +872,72 @@ int ACE_POSIX_AIOCB_Proactor::delete_result_aiocb_list (void) size_t ai; + for (ai = 0; ai < aiocb_list_max_size_; ++ai) + { + if (this->result_list_[ai] == 0 || this->aiocb_list_[ai] != 0) + continue; + + this->abandon_pending_aio (); + delete this->result_list_[ai]; + this->result_list_[ai] = 0; + } + // Try to cancel all uncompleted operations; POSIX systems may have // hidden system threads that still can work with our aiocbs! - for (ai = 0; ai < aiocb_list_max_size_; ai++) + for (ai = 0; ai < aiocb_list_max_size_; ++ai) if (this->aiocb_list_[ai] != 0) // active operation this->cancel_aiocb (result_list_[ai]); + const ACE_Time_Value settle_interval (0, 10000); + const size_t max_settle_attempts = 50; int num_pending = 0; - for (ai = 0; ai < aiocb_list_max_size_; ai++) + for (size_t attempt = 0; attempt < max_settle_attempts; ++attempt) + { + num_pending = 0; + + for (ai = 0; ai < aiocb_list_max_size_; ++ai) + { + if (this->aiocb_list_[ai] == 0) // not active operation + continue; + + int error_status = 0; + size_t transfer_count = 0; + int flg_completed = this->get_result_status (result_list_[ai], + error_status, + transfer_count); + + if (flg_completed == 0) // not completed + { + ++num_pending; + continue; + } + + delete this->result_list_[ai]; + this->result_list_[ai] = 0; + this->aiocb_list_[ai] = 0; + } + + if (num_pending == 0) + break; + + if (attempt + 1 < max_settle_attempts) + ACE_OS::sleep (settle_interval); + } + + for (ai = 0; ai < aiocb_list_max_size_; ++ai) { - if (this->aiocb_list_[ai] == 0 ) // not active operation + if (this->aiocb_list_[ai] == 0) continue; - // Get the error and return status of the aio_ operation. int error_status = 0; size_t transfer_count = 0; int flg_completed = this->get_result_status (result_list_[ai], error_status, transfer_count); - //don't delete uncompleted AIOCB's if (flg_completed == 0) // not completed !!! { - num_pending++; #if 0 char * errtxt = ACE_OS::strerror (error_status); if (errtxt == 0) @@ -897,16 +964,6 @@ int ACE_POSIX_AIOCB_Proactor::delete_result_aiocb_list (void) } } - // If it is not possible cancel some operation (num_pending > 0 ), - // we can do only one thing -report about this - // and complain about POSIX implementation. - // We know that we have memory leaks, but it is better than - // segmentation fault! - ACELIB_DEBUG - ((LM_DEBUG, - ACE_TEXT("ACE_POSIX_AIOCB_Proactor::delete_result_aiocb_list\n") - ACE_TEXT(" number pending AIO=%d\n"), - num_pending)); delete [] this->aiocb_list_; this->aiocb_list_ = 0; @@ -914,8 +971,7 @@ int ACE_POSIX_AIOCB_Proactor::delete_result_aiocb_list (void) delete [] this->result_list_; this->result_list_ = 0; - return (num_pending == 0 ? 0 : -1); - // ?? or just always return 0; + return 0; } void ACE_POSIX_AIOCB_Proactor::check_max_aio_num () @@ -971,9 +1027,6 @@ void ACE_POSIX_AIOCB_Proactor::check_max_aio_num () && aiocb_list_max_size_ > (unsigned long) max_num_files) aiocb_list_max_size_ = (unsigned long) max_num_files; - ACELIB_DEBUG ((LM_DEBUG, - "(%P | %t) ACE_POSIX_AIOCB_Proactor::Max Number of AIOs=%d\n", - aiocb_list_max_size_)); } void @@ -990,12 +1043,25 @@ ACE_POSIX_AIOCB_Proactor::create_notify_manager (void) void ACE_POSIX_AIOCB_Proactor::delete_notify_manager (void) { + ACE_MT (ACE_GUARD (ACE_SYNCH_MUTEX, ace_mon, this->notify_manager_mutex_)); + // We are responsible for delete as all pointers set to 0 after // delete, it is save to delete twice delete aiocb_notify_pipe_manager_; aiocb_notify_pipe_manager_ = 0; } +int +ACE_POSIX_AIOCB_Proactor::ensure_notify_manager (void) +{ + ACE_MT (ACE_GUARD_RETURN (ACE_SYNCH_MUTEX, ace_mon, this->notify_manager_mutex_, -1)); + + if (this->aiocb_notify_pipe_manager_ == 0) + this->create_notify_manager (); + + return this->aiocb_notify_pipe_manager_ != 0 ? 0 : -1; +} + int ACE_POSIX_AIOCB_Proactor::handle_events (ACE_Time_Value &wait_time) { @@ -1015,12 +1081,23 @@ ACE_POSIX_AIOCB_Proactor::notify_completion(int sig_num) { ACE_UNUSED_ARG (sig_num); + if (this->aiocb_notify_pipe_manager_ == 0) + return 0; + return this->aiocb_notify_pipe_manager_->notify (); } +void +ACE_POSIX_AIOCB_Proactor::abandon_pending_aio (void) +{ +} + int ACE_POSIX_AIOCB_Proactor::post_completion (ACE_POSIX_Asynch_Result *result) { + if (this->ensure_notify_manager () != 0) + return -1; + ACE_MT (ACE_GUARD_RETURN (ACE_SYNCH_MUTEX, ace_mon, this->mutex_, -1)); int ret_val = this->putq_result (result); @@ -1108,28 +1185,72 @@ ACE_POSIX_AIOCB_Proactor::handle_events_i (u_long milli_seconds) { int result_suspend = 0; int retval= 0; + aiocb **wait_list = 0; + size_t wait_list_size = 0; + bool retry_deferred = false; - if (milli_seconds == ACE_INFINITE) - // Indefinite blocking. - result_suspend = aio_suspend (aiocb_list_, - aiocb_list_max_size_, - 0); - else + ACE_MT (ACE_GUARD_RETURN (ACE_Thread_Mutex, dispatch_guard, this->dispatch_mutex_, -1)); + + { + ACE_MT (ACE_GUARD_RETURN (ACE_Thread_Mutex, ace_mon, this->mutex_, -1)); + + if (this->num_deferred_aiocb_ != 0) + { + // AIO queue overflow can defer socket operations until resources + // free up. If we then block forever on the notify pipe AIO alone, + // those deferred operations never get another chance to start. + this->start_deferred_aio (); + retry_deferred = (this->num_deferred_aiocb_ != 0); + } + + wait_list_size = this->num_started_aio_; + + if (wait_list_size != 0) + { + ACE_NEW_RETURN (wait_list, aiocb *[wait_list_size], -1); + + size_t wait_index = 0; + for (size_t list_index = 0; + list_index < this->aiocb_list_max_size_ && wait_index < wait_list_size; + ++list_index) + { + if (this->aiocb_list_[list_index] != 0) + wait_list[wait_index++] = this->aiocb_list_[list_index]; + } + + wait_list_size = wait_index; + } + } + + if (wait_list_size != 0) { - // Block on for - timespec timeout; - timeout.tv_sec = milli_seconds / 1000; - timeout.tv_nsec = (milli_seconds - (timeout.tv_sec * 1000)) * 1000000; - result_suspend = aio_suspend (aiocb_list_, - aiocb_list_max_size_, - &timeout); + if (milli_seconds == ACE_INFINITE && !retry_deferred) + // Indefinite blocking. + result_suspend = aio_suspend (wait_list, + wait_list_size, + 0); + else + { + // Block on for + u_long suspend_millis = + (milli_seconds == ACE_INFINITE) ? 10 : milli_seconds; + timespec timeout; + timeout.tv_sec = suspend_millis / 1000; + timeout.tv_nsec = + (suspend_millis - (timeout.tv_sec * 1000)) * 1000000; + result_suspend = aio_suspend (wait_list, + wait_list_size, + &timeout); + } } + delete [] wait_list; + // Check for errors if (result_suspend == -1) { if (errno != EAGAIN && // Timeout - errno != EINTR ) // Interrupted call + errno != EINTR) // Interrupted call ACELIB_ERROR ((LM_ERROR, ACE_TEXT ("%N:%l:(%P|%t)::%p\n"), ACE_TEXT ("handle_events: aio_suspend failed"))); @@ -1199,6 +1320,10 @@ ACE_POSIX_AIOCB_Proactor::find_completed_aio (int &error_status, // parameter index defines initial slot to scan // parameter count tells us how many slots should we scan + if (this->get_impl_type () != ACE_POSIX_Proactor::PROACTOR_CB + && this->ensure_notify_manager () != 0) + return 0; + ACE_MT (ACE_GUARD_RETURN (ACE_Thread_Mutex, ace_mon, this->mutex_, 0)); ACE_POSIX_Asynch_Result *asynch_result = 0; @@ -1296,6 +1421,10 @@ ACE_POSIX_AIOCB_Proactor::start_aio (ACE_POSIX_Asynch_Result *result, { case 0: // started OK aiocb_list_[index] = result; + // Wake any thread blocked in aio_suspend() on an older snapshot of + // the active AIO set so it rebuilds the wait list to include this + // newly started operation. + this->notify_completion (result->signal_number ()); return 0; case 1: // OS AIO queue overflow @@ -1315,11 +1444,12 @@ ssize_t ACE_POSIX_AIOCB_Proactor::allocate_aio_slot (ACE_POSIX_Asynch_Result *result) { size_t i = 0; + const ACE_HANDLE notify_pipe_read_handle = this->notify_pipe_read_handle_.value (); // we reserve zero slot for ACE_AIOCB_Notify_Pipe_Manager // so make check for ACE_AIOCB_Notify_Pipe_Manager request - if (notify_pipe_read_handle_ == result->aio_fildes) // Notify_Pipe ? + if (notify_pipe_read_handle == result->aio_fildes) // Notify_Pipe ? { // should be free, if (result_list_[i] != 0) // only 1 request { // is allowed @@ -1368,7 +1498,7 @@ ACE_POSIX_AIOCB_Proactor::start_aio_i (ACE_POSIX_Asynch_Result *result) // The following aio_ptr anathema is required to work around a bug in // the optimizer for GCC 4.1.2 aiocb * aio_ptr (result); - switch (result->aio_lio_opcode ) + switch (result->aio_lio_opcode) { case LIO_READ : ptype = ACE_TEXT ("read "); @@ -1419,29 +1549,38 @@ ACE_POSIX_AIOCB_Proactor::start_deferred_aio () if (num_deferred_aiocb_ == 0) return 0; // nothing to do - size_t i = 0; + size_t deferred_index = this->aiocb_list_max_size_; + size_t deferred_count = 0; - for (i= 0; i < this->aiocb_list_max_size_; i++) - if (result_list_[i] !=0 // check for - && aiocb_list_[i] ==0) // deferred AIO - break; + for (size_t i = 0; i < this->aiocb_list_max_size_; ++i) + { + if (this->result_list_[i] == 0 || this->aiocb_list_[i] != 0) + continue; - if (i >= this->aiocb_list_max_size_) - ACELIB_ERROR_RETURN ((LM_ERROR, - "%N:%l:(%P | %t)::\n" - "start_deferred_aio:" - "internal Proactor error 3\n"), - -1); + if (deferred_index == this->aiocb_list_max_size_) + deferred_index = i; - ACE_POSIX_Asynch_Result *result = result_list_[i]; + ++deferred_count; + } + + if (deferred_count == 0) + { + this->num_deferred_aiocb_ = 0; + return 0; + } + + if (this->num_deferred_aiocb_ != deferred_count) + this->num_deferred_aiocb_ = deferred_count; + + ACE_POSIX_Asynch_Result *result = this->result_list_[deferred_index]; int ret_val = start_aio_i (result); switch (ret_val) { case 0 : //started OK , decrement count of deferred AIOs - aiocb_list_[i] = result; - num_deferred_aiocb_ --; + this->aiocb_list_[deferred_index] = result; + --this->num_deferred_aiocb_; return 0; case 1 : @@ -1453,13 +1592,14 @@ ACE_POSIX_AIOCB_Proactor::start_deferred_aio () //AL notify user - result_list_[i] = 0; + this->result_list_[deferred_index] = 0; --aiocb_list_cur_size_; --num_deferred_aiocb_; result->set_error (errno); result->set_bytes_transferred (0); + this->abandon_pending_aio (); this->putq_result (result); // we are with locked mutex_ here ! return -1; @@ -1483,6 +1623,11 @@ ACE_POSIX_AIOCB_Proactor::cancel_aio (ACE_HANDLE handle) ACE_TRACE ("ACE_POSIX_AIOCB_Proactor::cancel_aio"); + if (this->get_impl_type () != ACE_POSIX_Proactor::PROACTOR_CB + && this->aiocb_notify_pipe_manager_ == 0 + && this->ensure_notify_manager () != 0) + return -1; + int num_total = 0; int num_cancelled = 0; @@ -1514,6 +1659,7 @@ ACE_POSIX_AIOCB_Proactor::cancel_aio (ACE_HANDLE handle) asynch_result->set_error (ECANCELED); asynch_result->set_bytes_transferred (0); + this->abandon_pending_aio (); this->putq_result (asynch_result); // we are with locked mutex_ here ! } @@ -1926,13 +2072,6 @@ ACE_POSIX_SIG_Proactor::handle_events_i (const ACE_Time_Value *timeout) // process post_completed results ret_que = this->process_result_queue (); - // Uncomment this if you want to test - // and research the behavior of you system -#if 0 - ACELIB_DEBUG ((LM_DEBUG, - "(%t) NumAIO=%d NumQueue=%d\n", - ret_aio, ret_que)); -#endif return ret_aio + ret_que > 0 ? 1 : 0; } @@ -1995,10 +2134,10 @@ ACE_POSIX_Wakeup_Completion::complete (size_t /* bytes_transferred */, const void * /* completion_key */, u_long /* error */) { - - ACE_Handler *handler = this->handler_proxy_.get ()->handler (); - if (handler != 0) - handler->handle_wakeup (); + // These completions are posted only to wake a blocked POSIX event-loop + // thread. post_wakeup_completions() constructs them with a null handler + // proxy, so dispatch is intentionally a no-op once the wakeup has been + // observed. } ACE_END_VERSIONED_NAMESPACE_DECL diff --git a/ACE/ace/POSIX_Proactor.h b/ACE/ace/POSIX_Proactor.h index 46ebef1fab014..05d60515b5ff4 100644 --- a/ACE/ace/POSIX_Proactor.h +++ b/ACE/ace/POSIX_Proactor.h @@ -26,6 +26,7 @@ // system calls. #include "ace/Proactor_Impl.h" +#include "ace/Atomic_Op.h" #include "ace/Free_List.h" #include "ace/Pipe.h" #include "ace/POSIX_Asynch_IO.h" @@ -301,14 +302,11 @@ class ACE_Export ACE_POSIX_Proactor : public ACE_Proactor_Impl virtual int post_wakeup_completions (int how_many); protected: - /// Handler to handle the wakeups. This works in conjunction with the - /// . - ACE_Handler wakeup_handler_; int os_id_; private: /// Task to process pseudo-asynchronous accept/connect - ACE_Asynch_Pseudo_Task pseudo_task_; + ACE_Asynch_Pseudo_Task *pseudo_task_; }; @@ -409,6 +407,9 @@ class ACE_Export ACE_POSIX_AIOCB_Proactor : public ACE_POSIX_Proactor void create_notify_manager (void); void delete_notify_manager (void); + /// Lazily create the notify manager and report whether it is ready. + int ensure_notify_manager (void); + /// Define the maximum number of asynchronous I/O requests /// for the current OS void check_max_aio_num (void) ; @@ -445,6 +446,10 @@ class ACE_Export ACE_POSIX_AIOCB_Proactor : public ACE_POSIX_Proactor /// called from post_completion method virtual int notify_completion (int sig_num); + /// Called when an AIO request that was expected to signal completion + /// will never invoke its normal completion callback. + virtual void abandon_pending_aio (void); + /// Put "post_completed" result into the internal queue int putq_result (ACE_POSIX_Asynch_Result *result); @@ -476,10 +481,16 @@ class ACE_Export ACE_POSIX_AIOCB_Proactor : public ACE_POSIX_Proactor /// Mutex to protect work with lists. ACE_SYNCH_MUTEX mutex_; + /// Serialize lazy creation/destruction of the notify pipe manager. + ACE_SYNCH_MUTEX notify_manager_mutex_; + + /// Serialize aio_suspend wait/dequeue handling for the AIOCB backend. + ACE_SYNCH_MUTEX dispatch_mutex_; + /// The purpose of this member is only to identify asynchronous request /// from NotifyManager. We will reserve for it always slot 0 /// in the list of aiocb's to be sure that don't lose notifications. - ACE_HANDLE notify_pipe_read_handle_ ; + ACE_Atomic_Op notify_pipe_read_handle_; /// Number of ACE_POSIX_Asynch_Result's waiting for start /// i.e. deferred AIOs diff --git a/ACE/ace/POSIX_Proactor.inl b/ACE/ace/POSIX_Proactor.inl index c6edd4a5ca87a..1c3c53c80e5fa 100644 --- a/ACE/ace/POSIX_Proactor.inl +++ b/ACE/ace/POSIX_Proactor.inl @@ -4,7 +4,7 @@ ACE_BEGIN_VERSIONED_NAMESPACE_DECL ACE_INLINE ACE_Asynch_Pseudo_Task& ACE_POSIX_Proactor::get_asynch_pseudo_task (void) { - return this->pseudo_task_; + return *this->pseudo_task_; } ACE_END_VERSIONED_NAMESPACE_DECL diff --git a/ACE/ace/Proactor.cpp b/ACE/ace/Proactor.cpp index 420379b359ef2..749f01c76f002 100644 --- a/ACE/ace/Proactor.cpp +++ b/ACE/ace/Proactor.cpp @@ -6,6 +6,7 @@ // calls. #include "ace/Auto_Ptr.h" +#include "ace/Atomic_Op.h" #include "ace/Proactor_Impl.h" #include "ace/Object_Manager.h" #include "ace/Task_T.h" @@ -86,13 +87,13 @@ class ACE_Proactor_Timer_Handler : public ACE_Task ACE_Proactor &proactor_; /// Flag used to indicate when we are shutting down. - int shutting_down_; + ACE_Atomic_Op shutting_down_; }; ACE_Proactor_Timer_Handler::ACE_Proactor_Timer_Handler (ACE_Proactor &proactor) : ACE_Task (&proactor.thr_mgr_), proactor_ (proactor), - shutting_down_ (0) + shutting_down_ (false) { } @@ -104,8 +105,7 @@ ACE_Proactor_Timer_Handler::~ACE_Proactor_Timer_Handler (void) int ACE_Proactor_Timer_Handler::destroy (void) { - // Mark for closing down. - this->shutting_down_ = 1; + this->shutting_down_ = true; // Signal timer event. this->timer_event_.signal (); @@ -124,32 +124,22 @@ ACE_Proactor_Timer_Handler::signal (void) int ACE_Proactor_Timer_Handler::svc (void) { - ACE_Time_Value absolute_time; ACE_Time_Value relative_time; int result = 0; - while (this->shutting_down_ == 0) + for (;;) { - // Check whether the timer queue has any items in it. - if (this->proactor_.timer_queue ()->is_empty () == 0) + if (this->shutting_down_.value () != 0) + break; + + ACE_Time_Value *wait_time = + this->proactor_.timer_queue ()->calculate_timeout (0, + &relative_time); + + // Block using the timer queue's lock-safe timeout calculation. + if (wait_time != 0) { - // Get the earliest absolute time. - absolute_time = this->proactor_.timer_queue ()->earliest_time (); - - // Get current time from timer queue since we don't know - // which was used. - ACE_Time_Value cur_time = - this->proactor_.timer_queue ()->gettimeofday (); - - // Compare absolute time with curent time received from the - // timer queue. - if (absolute_time > cur_time) - relative_time = absolute_time - cur_time; - else - relative_time = ACE_Time_Value::zero; - - // Block for relative time. - result = this->timer_event_.wait (&relative_time, 0); + result = this->timer_event_.wait (wait_time, 0); } else // The timer queue has no entries, so wait indefinitely. @@ -425,6 +415,11 @@ ACE_Proactor::close_singleton (void) delete ACE_Proactor::proactor_; ACE_Proactor::proactor_ = 0; ACE_Proactor::delete_proactor_ = false; + + ACE_Framework_Repository *repository = + ACE_Framework_Repository::instance (); + if (repository != 0) + repository->remove_component (ACE_Proactor::name ()); } } @@ -458,12 +453,11 @@ ACE_Proactor::proactor_run_event_loop (PROACTOR_EVENT_HOOK eh) { ACE_TRACE ("ACE_Proactor::proactor_run_event_loop"); int result = 0; + int end_event_loop = 0; { ACE_MT (ACE_GUARD_RETURN (ACE_Thread_Mutex, ace_mon, mutex_, -1)); - // Early check. It is ok to do this without lock, since we care just - // whether it is zero or non-zero. if (this->end_event_loop_ != 0) return 0; @@ -474,9 +468,12 @@ ACE_Proactor::proactor_run_event_loop (PROACTOR_EVENT_HOOK eh) // Run the event loop. for (;;) { - // Check the end loop flag. It is ok to do this without lock, - // since we care just whether it is zero or non-zero. - if (this->end_event_loop_ != 0) + { + ACE_MT (ACE_GUARD_RETURN (ACE_Thread_Mutex, ace_mon, mutex_, -1)); + end_event_loop = this->end_event_loop_; + } + + if (end_event_loop != 0) break; // is not set. Ready to do . @@ -514,12 +511,11 @@ ACE_Proactor::proactor_run_event_loop (ACE_Time_Value &tv, { ACE_TRACE ("ACE_Proactor::proactor_run_event_loop"); int result = 0; + int end_event_loop = 0; { ACE_MT (ACE_GUARD_RETURN (ACE_Thread_Mutex, ace_mon, mutex_, -1)); - // Early check. It is ok to do this without lock, since we care just - // whether it is zero or non-zero. if (this->end_event_loop_ != 0 || tv == ACE_Time_Value::zero) return 0; @@ -531,9 +527,12 @@ ACE_Proactor::proactor_run_event_loop (ACE_Time_Value &tv, // Run the event loop. for (;;) { - // Check the end loop flag. It is ok to do this without lock, - // since we care just whether it is zero or non-zero. - if (this->end_event_loop_ != 0) + { + ACE_MT (ACE_GUARD_RETURN (ACE_Thread_Mutex, ace_mon, mutex_, -1)); + end_event_loop = this->end_event_loop_; + } + + if (end_event_loop != 0) break; // is not set. Ready to do . @@ -613,6 +612,11 @@ ACE_Proactor::proactor_event_loop_done (void) int ACE_Proactor::close (void) { + // Stop the timer thread before tearing down the implementation it + // posts completions into. + delete this->timer_handler_; + this->timer_handler_ = 0; + // Close the implementation. if (this->implementation ()->close () == -1) ACELIB_ERROR ((LM_ERROR, @@ -626,13 +630,6 @@ ACE_Proactor::close (void) this->implementation_ = 0; } - // Delete the timer handler. - if (this->timer_handler_) - { - delete this->timer_handler_; - this->timer_handler_ = 0; - } - // Delete the timer queue. if (this->delete_timer_queue_) { diff --git a/ACE/ace/Thread_Exit.cpp b/ACE/ace/Thread_Exit.cpp index 0d35cdba213ad..4c5b54ce48d1e 100644 --- a/ACE/ace/Thread_Exit.cpp +++ b/ACE/ace/Thread_Exit.cpp @@ -12,6 +12,11 @@ ACE_Thread_Exit::cleanup (void *instance) { ACE_OS_TRACE ("ACE_Thread_Exit::cleanup"); + ACE_MT (ACE_Thread_Mutex *lock = + ACE_Managed_Object::get_preallocated_object + (ACE_Object_Manager::ACE_THREAD_EXIT_LOCK); + ACE_GUARD (ACE_Thread_Mutex, ace_mon, *lock)); + delete (ACE_TSS_TYPE (ACE_Thread_Exit) *) instance; // Set the thr_exit_ static to null to keep things from crashing if @@ -35,29 +40,27 @@ ACE_Thread_Exit::instance (void) // Determines if we were dynamically allocated. static ACE_TSS_TYPE (ACE_Thread_Exit) * volatile instance_; + ACE_TSS_TYPE (ACE_Thread_Exit) *instance = 0; - // Implement the Double Check pattern. + ACE_MT (ACE_Thread_Mutex *lock = + ACE_Managed_Object::get_preallocated_object + (ACE_Object_Manager::ACE_THREAD_EXIT_LOCK); + ACE_GUARD_RETURN (ACE_Thread_Mutex, ace_mon, *lock, 0)); if (!ACE_Thread_Exit::is_constructed_) { - ACE_MT (ACE_Thread_Mutex *lock = - ACE_Managed_Object::get_preallocated_object - (ACE_Object_Manager::ACE_THREAD_EXIT_LOCK); - ACE_GUARD_RETURN (ACE_Thread_Mutex, ace_mon, *lock, 0)); - - if (!ACE_Thread_Exit::is_constructed_) - { - ACE_NEW_RETURN (instance_, - ACE_TSS_TYPE (ACE_Thread_Exit), - 0); + ACE_NEW_RETURN (instance_, + ACE_TSS_TYPE (ACE_Thread_Exit), + 0); - ACE_Thread_Exit::is_constructed_ = true; + ACE_Thread_Exit::is_constructed_ = true; - ACE_Thread_Manager::set_thr_exit (instance_); - } + ACE_Thread_Manager::set_thr_exit (instance_); } - return ACE_TSS_GET (instance_, ACE_Thread_Exit); + instance = instance_; + + return ACE_TSS_GET (instance, ACE_Thread_Exit); #else return 0; #endif /* ACE_HAS_THREAD_SPECIFIC_STORAGE || ACE_HAS_TSS_EMULATION */ diff --git a/ACE/ace/Uring_Asynch_IO.cpp b/ACE/ace/Uring_Asynch_IO.cpp new file mode 100644 index 0000000000000..70bd41f0f38d1 --- /dev/null +++ b/ACE/ace/Uring_Asynch_IO.cpp @@ -0,0 +1,1926 @@ +//============================================================================= +/** + * @file Uring_Asynch_IO.cpp + * + * The Linux io_uring asynchronous operation implementations. + */ +//============================================================================= + +#include "Uring_Asynch_IO.h" + +#if defined (ACE_HAS_AIO_CALLS) && defined (ACE_HAS_IO_URING) + +#include "ace/Flag_Manip.h" +#include "ace/Message_Block.h" +#include "ace/Asynch_IO.h" +#include "ace/OS_NS_errno.h" +#include "ace/OS_NS_string.h" +#include "ace/OS_NS_sys_socket.h" +#include "ace/OS_NS_Thread.h" +#include "ace/Addr.h" +#include "ace/Log_Category.h" + +ACE_BEGIN_VERSIONED_NAMESPACE_DECL + +namespace +{ + int + ace_uring_build_iovecs (ACE_Message_Block *message_block, + size_t requested_bytes, + bool write_operation, + struct iovec *&iovec_array, + unsigned int &iovec_count, + size_t &prepared_bytes) + { + iovec_array = 0; + iovec_count = 0; + prepared_bytes = 0; + + if (message_block == 0) + { + errno = EFAULT; + return -1; + } + + for (ACE_Message_Block *mb = message_block; + mb != 0 && prepared_bytes < requested_bytes && iovec_count < ACE_IOV_MAX; + mb = mb->cont ()) + { + size_t const available = write_operation ? mb->length () : mb->space (); + if (available == 0) + continue; + + size_t const remaining = requested_bytes - prepared_bytes; + size_t const len = available > remaining ? remaining : available; + if (len == 0) + break; + + ++iovec_count; + prepared_bytes += len; + } + + if (iovec_count == 0 || prepared_bytes == 0) + { + errno = EINVAL; + return -1; + } + + ACE_NEW_RETURN (iovec_array, struct iovec[iovec_count], -1); + + prepared_bytes = 0; + unsigned int index = 0; + for (ACE_Message_Block *mb = message_block; + mb != 0 && prepared_bytes < requested_bytes && index < iovec_count; + mb = mb->cont ()) + { + size_t const available = write_operation ? mb->length () : mb->space (); + if (available == 0) + continue; + + size_t const remaining = requested_bytes - prepared_bytes; + size_t const len = available > remaining ? remaining : available; + iovec_array[index].iov_base = write_operation + ? static_cast (mb->rd_ptr ()) + : static_cast (mb->wr_ptr ()); + iovec_array[index].iov_len = len; + prepared_bytes += len; + ++index; + } + + iovec_count = index; + return 0; + } + + void + ace_uring_advance_read_chain (ACE_Message_Block *message_block, + size_t bytes_transferred) + { + for (ACE_Message_Block *mb = message_block; + mb != 0 && bytes_transferred > 0; + mb = mb->cont ()) + { + size_t len_part = mb->space (); + if (len_part > bytes_transferred) + len_part = bytes_transferred; + + mb->wr_ptr (len_part); + bytes_transferred -= len_part; + } + } + + void + ace_uring_advance_write_chain (ACE_Message_Block *message_block, + size_t bytes_transferred) + { + for (ACE_Message_Block *mb = message_block; + mb != 0 && bytes_transferred > 0; + mb = mb->cont ()) + { + size_t len_part = mb->length (); + if (len_part > bytes_transferred) + len_part = bytes_transferred; + + mb->rd_ptr (len_part); + bytes_transferred -= len_part; + } + } +} + +// --------------------------------------------------------------------------- +// Base Result +// --------------------------------------------------------------------------- + +ACE_Uring_Asynch_Result::ACE_Uring_Asynch_Result + (const ACE_Handler::Proxy_Ptr &handler_proxy, + const void *act, + ACE_HANDLE handle, + u_long offset, + u_long offset_high, + ACE_Proactor *proactor) + : handler_ (0), + handler_proxy_ (handler_proxy), + act_ (act), + handle_ (handle), + offset_ (offset), + offset_high_ (offset_high), + proactor_ (proactor), + bytes_transferred_ (0), + error_ (0), + owner_ (0) +{ + ACE_Handler::Proxy *const proxy = this->handler_proxy_.get (); + this->handler_ = proxy != 0 ? proxy->handler () : 0; +} + +ACE_Uring_Asynch_Result::~ACE_Uring_Asynch_Result (void) +{ +} + +ACE_Handler * +ACE_Uring_Asynch_Result::handler (void) const +{ + ACE_Handler::Proxy *const proxy = this->handler_proxy_.get (); + return proxy != 0 ? proxy->handler () : 0; +} + +ACE_Handler * +ACE_Uring_Asynch_Result::dispatch_handler (void) const +{ + return this->handler_; +} + +size_t +ACE_Uring_Asynch_Result::bytes_transferred (void) const +{ + return this->bytes_transferred_; +} + +void +ACE_Uring_Asynch_Result::set_bytes_transferred (size_t n) +{ + this->bytes_transferred_ = n; +} + +u_long +ACE_Uring_Asynch_Result::error (void) const +{ + return this->error_; +} + +void +ACE_Uring_Asynch_Result::set_error (u_long err) +{ + this->error_ = err; +} + +const void * +ACE_Uring_Asynch_Result::act (void) const +{ + return this->act_; +} + +int +ACE_Uring_Asynch_Result::success (void) const +{ + return this->error_ == 0; +} + +const void * +ACE_Uring_Asynch_Result::completion_key (void) const +{ + return 0; +} + +ACE_HANDLE +ACE_Uring_Asynch_Result::event (void) const +{ + return this->handle_; +} + +u_long +ACE_Uring_Asynch_Result::offset (void) const +{ + return this->offset_; +} + +u_long +ACE_Uring_Asynch_Result::offset_high (void) const +{ + return this->offset_high_; +} + +int +ACE_Uring_Asynch_Result::priority (void) const +{ + return 0; +} + +int +ACE_Uring_Asynch_Result::signal_number (void) const +{ + return 0; +} + +void +ACE_Uring_Asynch_Result::owner (ACE_Uring_Asynch_Operation *operation) +{ + this->owner_ = operation; +} + +ACE_Uring_Asynch_Operation * +ACE_Uring_Asynch_Result::owner (void) const +{ + return this->owner_.value (); +} + +int +ACE_Uring_Asynch_Result::post_completion (ACE_Proactor_Impl *proactor_impl) +{ + ACE_Uring_Proactor *const up = dynamic_cast (proactor_impl); + if (up == 0) + return -1; + + if (!up->is_initialized ()) + return -1; + + ACE_GUARD_RETURN (ACE_Thread_Mutex, ace_mon, up->sq_mutex (), -1); + struct io_uring_sqe *const sqe = up->get_sqe (); + if (!sqe) + return -1; + ::io_uring_prep_nop (sqe); + ::io_uring_sqe_set_data (sqe, this); + return up->submit_sqe () < 0 ? -1 : 0; +} + +// --------------------------------------------------------------------------- +// Timer +// --------------------------------------------------------------------------- + +ACE_Uring_Asynch_Timer::ACE_Uring_Asynch_Timer + (const ACE_Handler::Proxy_Ptr &handler_proxy, + const void *act, + const ACE_Time_Value &tv, + ACE_Proactor *proactor) + : ACE_Uring_Asynch_Result (handler_proxy, act, ACE_INVALID_HANDLE, 0, 0, proactor), + time_ (tv) +{ +} + +void +ACE_Uring_Asynch_Timer::complete (size_t, int, const void *, u_long) +{ + ACE_Handler *const handler = this->handler (); + if (handler != 0) + handler->handle_time_out (this->time_, this->act_); + delete this; +} + +// --------------------------------------------------------------------------- +// Base Operation +// --------------------------------------------------------------------------- + +ACE_Uring_Asynch_Operation::ACE_Uring_Asynch_Operation (ACE_Uring_Proactor *proactor) + : uring_proactor_ (proactor), + proactor_ (0), + handle_ (ACE_INVALID_HANDLE) +{ +} + +ACE_Uring_Asynch_Operation::~ACE_Uring_Asynch_Operation (void) +{ + if (this->uring_proactor_ != 0 && this->uring_proactor_->is_initialized ()) + (void) this->cancel (); +} + +int +ACE_Uring_Asynch_Operation::open (const ACE_Handler::Proxy_Ptr &handler_proxy, + ACE_HANDLE handle, + const void * /*completion_key*/, + ACE_Proactor *proactor) +{ + this->handler_proxy_ = handler_proxy; + this->handle_ = handle; + this->proactor_ = proactor; + + // Grab the handle from the handler if none was provided + // (matches POSIX behavior). + if (this->handle_ == ACE_INVALID_HANDLE) + { + ACE_Handler *const h = handler_proxy.get ()->handler (); + if (h != 0) + this->handle_ = h->handle (); + } + if (this->handle_ == ACE_INVALID_HANDLE) + return -1; + + return 0; +} + +int +ACE_Uring_Asynch_Operation::cancel (void) +{ + if (this->uring_proactor_ == 0) + return -1; + + ACE_GUARD_RETURN (ACE_Thread_Mutex, sq_mon, this->uring_proactor_->sq_mutex (), -1); + ACE_GUARD_RETURN (ACE_Thread_Mutex, pending_mon, this->pending_results_lock_, -1); + + if (this->pending_results_.is_empty ()) + return 1; + + ACE_Unbounded_Set::iterator const the_end = + this->pending_results_.end (); + for (ACE_Unbounded_Set::iterator i = + this->pending_results_.begin (); + i != the_end; + ++i) + { + struct io_uring_sqe *const sqe = this->uring_proactor_->get_sqe (); + if (!sqe) + { + errno = EAGAIN; + return -1; + } + + ::io_uring_prep_cancel (sqe, *i, 0); + ::io_uring_sqe_set_data (sqe, 0); + } + + int const submit_result = this->uring_proactor_->submit_pending_sqe (); + if (submit_result < 0) + { + errno = -submit_result; + return -1; + } + + return 0; +} + +ACE_Proactor * +ACE_Uring_Asynch_Operation::proactor (void) const +{ + return this->proactor_; +} + +ACE_Handler * +ACE_Uring_Asynch_Operation::handler (void) +{ + ACE_GUARD_RETURN (ACE_Thread_Mutex, ace_mon, this->pending_results_lock_, 0); + ACE_Handler::Proxy *const proxy = this->handler_proxy_.get (); + return proxy != 0 ? proxy->handler () : 0; +} + +int +ACE_Uring_Asynch_Operation::queue_result (ACE_Uring_Asynch_Result *result) +{ + this->register_result (result); + + if (this->uring_proactor_->signal_submitter () == -1) + { + this->unregister_result (result); + delete result; + return -1; + } + + return 0; +} + +int +ACE_Uring_Asynch_Operation::submit_result (ACE_Uring_Asynch_Result *result) +{ + this->register_result (result); + + if (this->uring_proactor_->signal_submitter () == -1) + { + this->unregister_result (result); + delete result; + return -1; + } + + return 0; +} + +void +ACE_Uring_Asynch_Operation::register_result (ACE_Uring_Asynch_Result *result) +{ + if (result == 0) + return; + + ACE_GUARD (ACE_Thread_Mutex, ace_mon, this->pending_results_lock_); + result->owner (this); + this->pending_results_.insert (result); +} + +void +ACE_Uring_Asynch_Operation::unregister_result (ACE_Uring_Asynch_Result *result) +{ + if (result == 0) + return; + + ACE_GUARD (ACE_Thread_Mutex, ace_mon, this->pending_results_lock_); + this->pending_results_.remove (result); + result->owner (0); +} + +// --------------------------------------------------------------------------- +// Read Stream and File Impl +// --------------------------------------------------------------------------- + +ACE_Uring_Asynch_Read_Stream_Result::ACE_Uring_Asynch_Read_Stream_Result + (const ACE_Handler::Proxy_Ptr &handler_proxy, + ACE_HANDLE handle, + ACE_Message_Block *message_block, + size_t bytes_to_read, + const void *act, + ACE_Proactor *proactor, + u_long offset, + u_long offset_high, + bool vectored, + struct iovec *iovec) + : ACE_Uring_Asynch_Result (handler_proxy, + act, + handle, + offset, + offset_high, + proactor), + message_block_ (message_block), + bytes_to_read_ (bytes_to_read), + vectored_ (vectored), + iovec_ (iovec) +{ +} + +ACE_Uring_Asynch_Read_Stream_Result::~ACE_Uring_Asynch_Read_Stream_Result (void) +{ + delete [] this->iovec_; +} + +size_t +ACE_Uring_Asynch_Read_Stream_Result::bytes_to_read (void) const +{ + return this->bytes_to_read_; +} + +ACE_Message_Block & +ACE_Uring_Asynch_Read_Stream_Result::message_block (void) const +{ + return *this->message_block_; +} + +ACE_HANDLE +ACE_Uring_Asynch_Read_Stream_Result::handle (void) const +{ + return this->handle_; +} + +void +ACE_Uring_Asynch_Read_Stream_Result::complete (size_t bytes_transferred, + int success, + const void *, + u_long error) +{ + this->bytes_transferred_ = bytes_transferred; + this->error_ = error; + if (success && this->message_block_ != 0) + { + if (this->vectored_) + ace_uring_advance_read_chain (this->message_block_, bytes_transferred); + else + this->message_block_->wr_ptr (bytes_transferred); + } + + ACE_Handler *const handler = this->handler (); + if (handler != 0) + { + ACE_Asynch_Read_Stream::Result result (this); + handler->handle_read_stream (result); + } + delete this; +} + +ACE_Uring_Asynch_Read_Stream::ACE_Uring_Asynch_Read_Stream + (ACE_Uring_Proactor *proactor) + : ACE_Uring_Asynch_Operation (proactor) +{ +} + +int +ACE_Uring_Asynch_Read_Stream::read (ACE_Message_Block &message_block, + size_t num_bytes_to_read, + const void *act, + int, + int) +{ + size_t const space = message_block.space (); + if (num_bytes_to_read > space) num_bytes_to_read = space; + + ACE_Uring_Asynch_Read_Stream_Result *result = 0; + ACE_NEW_RETURN (result, + ACE_Uring_Asynch_Read_Stream_Result (this->handler_proxy_, + this->handle_, + &message_block, + num_bytes_to_read, + act, + this->proactor_), + -1); + + ACE_GUARD_RETURN (ACE_Thread_Mutex, ace_mon, this->uring_proactor_->sq_mutex (), -1); + struct io_uring_sqe *const sqe = this->uring_proactor_->get_sqe (); + if (!sqe) + { + delete result; + errno = EAGAIN; + return -1; + } + + ::io_uring_prep_read (sqe, + this->handle_, + message_block.wr_ptr (), + (unsigned int) num_bytes_to_read, + 0); + ::io_uring_sqe_set_data (sqe, result); + return this->queue_result (result); +} + +int +ACE_Uring_Asynch_Read_Stream::readv (ACE_Message_Block &message_block, + size_t num_bytes_to_read, + const void *act, + int, + int) +{ + struct iovec *iovec = 0; + unsigned int iovec_count = 0; + if (ace_uring_build_iovecs (&message_block, + num_bytes_to_read, + false, + iovec, + iovec_count, + num_bytes_to_read) == -1) + return -1; + + ACE_Uring_Asynch_Read_Stream_Result *result = 0; + ACE_NEW_NORETURN (result, + ACE_Uring_Asynch_Read_Stream_Result (this->handler_proxy_, + this->handle_, + &message_block, + num_bytes_to_read, + act, + this->proactor_, + 0, + 0, + true, + iovec)); + if (result == 0) + { + delete [] iovec; + errno = ENOMEM; + return -1; + } + + ACE_GUARD_RETURN (ACE_Thread_Mutex, ace_mon, this->uring_proactor_->sq_mutex (), -1); + struct io_uring_sqe *const sqe = this->uring_proactor_->get_sqe (); + if (!sqe) + { + delete result; + errno = EAGAIN; + return -1; + } + + ::io_uring_prep_readv (sqe, + this->handle_, + iovec, + iovec_count, + 0); + ::io_uring_sqe_set_data (sqe, result); + return this->queue_result (result); +} + +ACE_Uring_Asynch_Read_File::ACE_Uring_Asynch_Read_File + (ACE_Uring_Proactor *proactor) + : ACE_Uring_Asynch_Read_Stream (proactor) +{ +} + +int +ACE_Uring_Asynch_Read_File::read (ACE_Message_Block &message_block, + size_t num_bytes_to_read, + u_long offset, + u_long offset_high, + const void *act, + int, + int) +{ + size_t const space = message_block.space (); + if (num_bytes_to_read > space) num_bytes_to_read = space; + + ACE_Uring_Asynch_Read_File_Result *result = 0; + ACE_NEW_RETURN (result, + ACE_Uring_Asynch_Read_File_Result (this->handler_proxy_, + this->handle_, + &message_block, + num_bytes_to_read, + act, + this->proactor_, + offset, + offset_high), + -1); + + ACE_UINT64 const full_offset = + (static_cast (offset_high) << 32) | offset; + + ACE_GUARD_RETURN (ACE_Thread_Mutex, ace_mon, this->uring_proactor_->sq_mutex (), -1); + struct io_uring_sqe *const sqe = this->uring_proactor_->get_sqe (); + if (!sqe) + { + delete result; + errno = EAGAIN; + return -1; + } + + ::io_uring_prep_read (sqe, + this->handle_, + message_block.wr_ptr (), + (unsigned int) num_bytes_to_read, + full_offset); + ::io_uring_sqe_set_data (sqe, result); + return this->queue_result (result); +} + +int +ACE_Uring_Asynch_Read_File::read (ACE_Message_Block &message_block, + size_t num_bytes_to_read, + const void *act, + int, + int) +{ + size_t const space = message_block.space (); + if (num_bytes_to_read > space) num_bytes_to_read = space; + + ACE_Uring_Asynch_Read_File_Result *result = 0; + ACE_NEW_RETURN (result, + ACE_Uring_Asynch_Read_File_Result (this->handler_proxy_, + this->handle_, + &message_block, + num_bytes_to_read, + act, + this->proactor_, + 0, + 0), + -1); + + ACE_GUARD_RETURN (ACE_Thread_Mutex, ace_mon, this->uring_proactor_->sq_mutex (), -1); + struct io_uring_sqe *const sqe = this->uring_proactor_->get_sqe (); + if (!sqe) + { + delete result; + errno = EAGAIN; + return -1; + } + + ::io_uring_prep_read (sqe, + this->handle_, + message_block.wr_ptr (), + (unsigned int) num_bytes_to_read, + 0); + ::io_uring_sqe_set_data (sqe, result); + return this->queue_result (result); +} + +int +ACE_Uring_Asynch_Read_File::readv (ACE_Message_Block &message_block, + size_t num_bytes_to_read, + u_long offset, + u_long offset_high, + const void *act, + int, + int) +{ + struct iovec *iovec = 0; + unsigned int iovec_count = 0; + if (ace_uring_build_iovecs (&message_block, + num_bytes_to_read, + false, + iovec, + iovec_count, + num_bytes_to_read) == -1) + return -1; + + ACE_Uring_Asynch_Read_File_Result *result = 0; + ACE_NEW_NORETURN (result, + ACE_Uring_Asynch_Read_File_Result (this->handler_proxy_, + this->handle_, + &message_block, + num_bytes_to_read, + act, + this->proactor_, + offset, + offset_high, + true, + iovec)); + if (result == 0) + { + delete [] iovec; + errno = ENOMEM; + return -1; + } + + ACE_UINT64 const full_offset = + (static_cast (offset_high) << 32) | offset; + + ACE_GUARD_RETURN (ACE_Thread_Mutex, ace_mon, this->uring_proactor_->sq_mutex (), -1); + struct io_uring_sqe *const sqe = this->uring_proactor_->get_sqe (); + if (!sqe) + { + delete result; + errno = EAGAIN; + return -1; + } + + ::io_uring_prep_readv (sqe, + this->handle_, + iovec, + iovec_count, + full_offset); + ::io_uring_sqe_set_data (sqe, result); + return this->queue_result (result); +} + +int +ACE_Uring_Asynch_Read_File::readv (ACE_Message_Block &message_block, + size_t num_bytes_to_read, + const void *act, + int priority, + int signal_number) +{ + return this->readv (message_block, + num_bytes_to_read, + 0, + 0, + act, + priority, + signal_number); +} + +ACE_Uring_Asynch_Read_File_Result::ACE_Uring_Asynch_Read_File_Result + (const ACE_Handler::Proxy_Ptr &handler_proxy, + ACE_HANDLE handle, + ACE_Message_Block *message_block, + size_t bytes_to_read, + const void *act, + ACE_Proactor *proactor, + u_long offset, + u_long offset_high, + bool vectored, + struct iovec *iovec) + : ACE_Uring_Asynch_Read_Stream_Result (handler_proxy, + handle, + message_block, + bytes_to_read, + act, + proactor, + offset, + offset_high, + vectored, + iovec) +{ +} + +void +ACE_Uring_Asynch_Read_File_Result::complete (size_t bytes_transferred, + int success, + const void *, + u_long error) +{ + this->bytes_transferred_ = bytes_transferred; + this->error_ = error; + if (success && this->message_block_ != 0) + { + if (this->vectored_) + ace_uring_advance_read_chain (this->message_block_, bytes_transferred); + else + this->message_block_->wr_ptr (bytes_transferred); + } + + ACE_Handler *const handler = this->handler (); + if (handler != 0) + { + ACE_Asynch_Read_File::Result result (this); + handler->handle_read_file (result); + } + delete this; +} + +// --------------------------------------------------------------------------- +// Write Stream and File Impl +// --------------------------------------------------------------------------- + +ACE_Uring_Asynch_Write_Stream_Result::ACE_Uring_Asynch_Write_Stream_Result + (const ACE_Handler::Proxy_Ptr &handler_proxy, + ACE_HANDLE handle, + ACE_Message_Block *message_block, + size_t bytes_to_write, + const void *act, + ACE_Proactor *proactor, + u_long offset, + u_long offset_high, + bool vectored, + struct iovec *iovec) + : ACE_Uring_Asynch_Result (handler_proxy, + act, + handle, + offset, + offset_high, + proactor), + message_block_ (message_block), + bytes_to_write_ (bytes_to_write), + vectored_ (vectored), + iovec_ (iovec) +{ +} + +ACE_Uring_Asynch_Write_Stream_Result::~ACE_Uring_Asynch_Write_Stream_Result (void) +{ + delete [] this->iovec_; +} + +size_t +ACE_Uring_Asynch_Write_Stream_Result::bytes_to_write (void) const +{ + return this->bytes_to_write_; +} + +ACE_Message_Block & +ACE_Uring_Asynch_Write_Stream_Result::message_block (void) const +{ + return *this->message_block_; +} + +ACE_HANDLE +ACE_Uring_Asynch_Write_Stream_Result::handle (void) const +{ + return this->handle_; +} + +void +ACE_Uring_Asynch_Write_Stream_Result::complete (size_t bytes_transferred, + int success, + const void *, + u_long error) +{ + this->bytes_transferred_ = bytes_transferred; + this->error_ = error; + if (success && this->message_block_ != 0) + { + if (this->vectored_) + ace_uring_advance_write_chain (this->message_block_, bytes_transferred); + else + this->message_block_->rd_ptr (bytes_transferred); + } + + ACE_Handler *const handler = this->handler (); + if (handler != 0) + { + ACE_Asynch_Write_Stream::Result result (this); + handler->handle_write_stream (result); + } + delete this; +} + +ACE_Uring_Asynch_Write_Stream::ACE_Uring_Asynch_Write_Stream + (ACE_Uring_Proactor *proactor) + : ACE_Uring_Asynch_Operation (proactor) +{ +} + +int +ACE_Uring_Asynch_Write_Stream::write (ACE_Message_Block &message_block, + size_t bytes_to_write, + const void *act, + int, + int) +{ + size_t const length = message_block.length (); + if (bytes_to_write > length) bytes_to_write = length; + + ACE_Uring_Asynch_Write_Stream_Result *result = 0; + ACE_NEW_RETURN (result, + ACE_Uring_Asynch_Write_Stream_Result (this->handler_proxy_, + this->handle_, + &message_block, + bytes_to_write, + act, + this->proactor_), + -1); + + ACE_GUARD_RETURN (ACE_Thread_Mutex, ace_mon, this->uring_proactor_->sq_mutex (), -1); + struct io_uring_sqe *const sqe = this->uring_proactor_->get_sqe (); + if (!sqe) + { + delete result; + errno = EAGAIN; + return -1; + } + + ::io_uring_prep_write (sqe, + this->handle_, + message_block.rd_ptr (), + (unsigned int) bytes_to_write, + 0); + ::io_uring_sqe_set_data (sqe, result); + return this->queue_result (result); +} + +int +ACE_Uring_Asynch_Write_Stream::writev (ACE_Message_Block &message_block, + size_t bytes_to_write, + const void *act, + int, + int) +{ + struct iovec *iovec = 0; + unsigned int iovec_count = 0; + if (ace_uring_build_iovecs (&message_block, + bytes_to_write, + true, + iovec, + iovec_count, + bytes_to_write) == -1) + return -1; + + ACE_Uring_Asynch_Write_Stream_Result *result = 0; + ACE_NEW_NORETURN (result, + ACE_Uring_Asynch_Write_Stream_Result (this->handler_proxy_, + this->handle_, + &message_block, + bytes_to_write, + act, + this->proactor_, + 0, + 0, + true, + iovec)); + if (result == 0) + { + delete [] iovec; + errno = ENOMEM; + return -1; + } + + ACE_GUARD_RETURN (ACE_Thread_Mutex, ace_mon, this->uring_proactor_->sq_mutex (), -1); + struct io_uring_sqe *const sqe = this->uring_proactor_->get_sqe (); + if (!sqe) + { + delete result; + errno = EAGAIN; + return -1; + } + + ::io_uring_prep_writev (sqe, + this->handle_, + iovec, + iovec_count, + 0); + ::io_uring_sqe_set_data (sqe, result); + return this->queue_result (result); +} + +ACE_Uring_Asynch_Write_File::ACE_Uring_Asynch_Write_File + (ACE_Uring_Proactor *proactor) + : ACE_Uring_Asynch_Write_Stream (proactor) +{ +} + +int +ACE_Uring_Asynch_Write_File::write (ACE_Message_Block &message_block, + size_t bytes_to_write, + u_long offset, + u_long offset_high, + const void *act, + int, + int) +{ + size_t const length = message_block.length (); + if (bytes_to_write > length) bytes_to_write = length; + + ACE_Uring_Asynch_Write_File_Result *result = 0; + ACE_NEW_RETURN (result, + ACE_Uring_Asynch_Write_File_Result (this->handler_proxy_, + this->handle_, + &message_block, + bytes_to_write, + act, + this->proactor_, + offset, + offset_high), + -1); + + ACE_UINT64 const full_offset = + (static_cast (offset_high) << 32) | offset; + + ACE_GUARD_RETURN (ACE_Thread_Mutex, ace_mon, this->uring_proactor_->sq_mutex (), -1); + struct io_uring_sqe *const sqe = this->uring_proactor_->get_sqe (); + if (!sqe) + { + delete result; + errno = EAGAIN; + return -1; + } + + ::io_uring_prep_write (sqe, + this->handle_, + message_block.rd_ptr (), + (unsigned int) bytes_to_write, + full_offset); + ::io_uring_sqe_set_data (sqe, result); + return this->queue_result (result); +} + +int +ACE_Uring_Asynch_Write_File::write (ACE_Message_Block &message_block, + size_t bytes_to_write, + const void *act, + int, + int) +{ + size_t const length = message_block.length (); + if (bytes_to_write > length) bytes_to_write = length; + + ACE_Uring_Asynch_Write_File_Result *result = 0; + ACE_NEW_RETURN (result, + ACE_Uring_Asynch_Write_File_Result (this->handler_proxy_, + this->handle_, + &message_block, + bytes_to_write, + act, + this->proactor_, + 0, + 0), + -1); + + ACE_GUARD_RETURN (ACE_Thread_Mutex, ace_mon, this->uring_proactor_->sq_mutex (), -1); + struct io_uring_sqe *const sqe = this->uring_proactor_->get_sqe (); + if (!sqe) + { + delete result; + errno = EAGAIN; + return -1; + } + + ::io_uring_prep_write (sqe, + this->handle_, + message_block.rd_ptr (), + (unsigned int) bytes_to_write, + 0); + ::io_uring_sqe_set_data (sqe, result); + return this->queue_result (result); +} + +int +ACE_Uring_Asynch_Write_File::writev (ACE_Message_Block &message_block, + size_t bytes_to_write, + u_long offset, + u_long offset_high, + const void *act, + int, + int) +{ + struct iovec *iovec = 0; + unsigned int iovec_count = 0; + if (ace_uring_build_iovecs (&message_block, + bytes_to_write, + true, + iovec, + iovec_count, + bytes_to_write) == -1) + return -1; + + ACE_Uring_Asynch_Write_File_Result *result = 0; + ACE_NEW_NORETURN (result, + ACE_Uring_Asynch_Write_File_Result (this->handler_proxy_, + this->handle_, + &message_block, + bytes_to_write, + act, + this->proactor_, + offset, + offset_high, + true, + iovec)); + if (result == 0) + { + delete [] iovec; + errno = ENOMEM; + return -1; + } + + ACE_UINT64 const full_offset = + (static_cast (offset_high) << 32) | offset; + + ACE_GUARD_RETURN (ACE_Thread_Mutex, ace_mon, this->uring_proactor_->sq_mutex (), -1); + struct io_uring_sqe *const sqe = this->uring_proactor_->get_sqe (); + if (!sqe) + { + delete result; + errno = EAGAIN; + return -1; + } + + ::io_uring_prep_writev (sqe, + this->handle_, + iovec, + iovec_count, + full_offset); + ::io_uring_sqe_set_data (sqe, result); + return this->queue_result (result); +} + +int +ACE_Uring_Asynch_Write_File::writev (ACE_Message_Block &message_block, + size_t bytes_to_write, + const void *act, + int priority, + int signal_number) +{ + return this->writev (message_block, + bytes_to_write, + 0, + 0, + act, + priority, + signal_number); +} + +ACE_Uring_Asynch_Write_File_Result::ACE_Uring_Asynch_Write_File_Result + (const ACE_Handler::Proxy_Ptr &handler_proxy, + ACE_HANDLE handle, + ACE_Message_Block *message_block, + size_t bytes_to_write, + const void *act, + ACE_Proactor *proactor, + u_long offset, + u_long offset_high, + bool vectored, + struct iovec *iovec) + : ACE_Uring_Asynch_Write_Stream_Result (handler_proxy, + handle, + message_block, + bytes_to_write, + act, + proactor, + offset, + offset_high, + vectored, + iovec) +{ +} + +void +ACE_Uring_Asynch_Write_File_Result::complete (size_t bytes_transferred, + int success, + const void *, + u_long error) +{ + this->bytes_transferred_ = bytes_transferred; + this->error_ = error; + if (success && this->message_block_ != 0) + { + if (this->vectored_) + ace_uring_advance_write_chain (this->message_block_, bytes_transferred); + else + this->message_block_->rd_ptr (bytes_transferred); + } + + ACE_Handler *const handler = this->handler (); + if (handler != 0) + { + ACE_Asynch_Write_File::Result result (this); + handler->handle_write_file (result); + } + delete this; +} + +// --------------------------------------------------------------------------- +// Accept Impl +// --------------------------------------------------------------------------- + +ACE_Uring_Asynch_Accept_Result::ACE_Uring_Asynch_Accept_Result + (const ACE_Handler::Proxy_Ptr &handler_proxy, + ACE_HANDLE listen_handle, + ACE_HANDLE accept_handle, + ACE_Message_Block *message_block, + size_t bytes_to_read, + const void *act, + ACE_Proactor *proactor) + : ACE_Uring_Asynch_Result (handler_proxy, + act, + listen_handle, + 0, + 0, + proactor), + accept_handle_ (accept_handle), + message_block_ (message_block), + bytes_to_read_ (bytes_to_read), + addr_len_ (sizeof (struct sockaddr_storage)) +{ +} + +ACE_HANDLE +ACE_Uring_Asynch_Accept_Result::accept_handle (void) const +{ + return this->accept_handle_; +} + +ACE_Message_Block & +ACE_Uring_Asynch_Accept_Result::message_block (void) const +{ + return *this->message_block_; +} + +ACE_HANDLE +ACE_Uring_Asynch_Accept_Result::listen_handle (void) const +{ + return this->handle_; +} + +size_t +ACE_Uring_Asynch_Accept_Result::bytes_to_read (void) const +{ + return this->bytes_to_read_; +} + +struct sockaddr * +ACE_Uring_Asynch_Accept_Result::addr (void) +{ + return reinterpret_cast (&this->client_addr_); +} + +socklen_t * +ACE_Uring_Asynch_Accept_Result::addrlen (void) +{ + return &this->addr_len_; +} + +void +ACE_Uring_Asynch_Accept_Result::complete (size_t bytes_transferred, + int success, + const void *, + u_long error) +{ + // io_uring returns the accepted fd in cqe->res for accept operations. + // Preserve that in accept_handle() and report zero transferred bytes, + // matching ACE's accept result contract. + this->bytes_transferred_ = 0; + this->error_ = error; + if (success) + this->accept_handle_ = (ACE_HANDLE) bytes_transferred; + + ACE_Handler *const handler = this->handler (); + if (handler != 0) + { + ACE_Asynch_Accept::Result result (this); + handler->handle_accept (result); + } + delete this; +} + +ACE_Uring_Asynch_Accept::ACE_Uring_Asynch_Accept + (ACE_Uring_Proactor *proactor) + : ACE_Uring_Asynch_Operation (proactor) +{ +} + +int +ACE_Uring_Asynch_Accept::accept (ACE_Message_Block &message_block, + size_t bytes_to_read, + ACE_HANDLE accept_handle, + const void *act, + int, + int, + int) +{ + ACE_Uring_Asynch_Accept_Result *result = 0; + ACE_NEW_RETURN (result, + ACE_Uring_Asynch_Accept_Result (this->handler_proxy_, + this->handle_, + accept_handle, + &message_block, + bytes_to_read, + act, + this->proactor_), + -1); + + ACE_GUARD_RETURN (ACE_Thread_Mutex, ace_mon, this->uring_proactor_->sq_mutex (), -1); + struct io_uring_sqe *const sqe = this->uring_proactor_->get_sqe (); + if (!sqe) + { + delete result; + errno = EAGAIN; + return -1; + } + + ::io_uring_prep_accept (sqe, this->handle_, result->addr (), result->addrlen (), 0); + ::io_uring_sqe_set_data (sqe, result); + return this->submit_result (result); +} + +// --------------------------------------------------------------------------- +// Connect Impl +// --------------------------------------------------------------------------- + +ACE_Uring_Asynch_Connect_Result::ACE_Uring_Asynch_Connect_Result ( + const ACE_Handler::Proxy_Ptr &handler_proxy, + ACE_HANDLE connect_handle, + const void *act, + ACE_Proactor *proactor) + : ACE_Uring_Asynch_Result (handler_proxy, + act, + connect_handle, + 0, + 0, + proactor) + , connect_handle_ (connect_handle) +{ +} + +void +ACE_Uring_Asynch_Connect_Result::complete (size_t bytes_transferred, + int /*success*/, + const void *, + u_long error) +{ + this->bytes_transferred_ = bytes_transferred; + this->error_ = error; + ACE_Handler *const handler = this->handler (); + if (handler != 0) + { + ACE_Asynch_Connect::Result result (this); + handler->handle_connect (result); + } + delete this; +} + +ACE_HANDLE ACE_Uring_Asynch_Connect_Result::connect_handle (void) const +{ + return this->connect_handle_; +} + +void +ACE_Uring_Asynch_Connect_Result::connect_handle (ACE_HANDLE handle) +{ + this->connect_handle_ = handle; +} + +ACE_Uring_Asynch_Connect::ACE_Uring_Asynch_Connect (ACE_Uring_Proactor *proactor) + : ACE_Uring_Asynch_Operation (proactor) +{ +} + +int +ACE_Uring_Asynch_Connect::open (const ACE_Handler::Proxy_Ptr &handler_proxy, + ACE_HANDLE handle, + const void *completion_key, + ACE_Proactor *proactor) +{ + // Call the base class but ignore failure when handle is INVALID_HANDLE: + // the connect socket is not known yet, it will be created per-connect call. + ACE_Uring_Asynch_Operation::open (handler_proxy, handle, completion_key, proactor); + this->handler_proxy_ = handler_proxy; + this->proactor_ = proactor; + return 0; +} + +int +ACE_Uring_Asynch_Connect::connect (ACE_HANDLE connect_handle, + const ACE_Addr &remote_sap, + const ACE_Addr &local_sap, + int reuse_addr, + const void *act, + int, + int) +{ + ACE_Uring_Asynch_Connect_Result *result = 0; + ACE_NEW_RETURN (result, + ACE_Uring_Asynch_Connect_Result (this->handler_proxy_, + connect_handle, + act, + this->proactor_), + -1); + + ACE_HANDLE handle = result->connect_handle (); + bool created_handle = false; + bool success = false; + + if (handle == ACE_INVALID_HANDLE) + { + int const protocol_family = remote_sap.get_type (); + + handle = ACE_OS::socket (protocol_family, + SOCK_STREAM, + 0); + // Save it. + result->connect_handle (handle); + connect_handle = handle; + created_handle = (handle != ACE_INVALID_HANDLE); + if (handle == ACE_INVALID_HANDLE) + { + result->set_error (errno); + ACELIB_ERROR ((LM_ERROR, + ACE_TEXT ("ACE_Uring_Asynch_Connect::connect_i: %p\n"), + ACE_TEXT ("socket"))); + } + else + { + // Reuse the address. + int const one = 1; + if (protocol_family != PF_UNIX && + reuse_addr != 0 && + ACE_OS::setsockopt (handle, + SOL_SOCKET, + SO_REUSEADDR, + (const char *) &one, + sizeof one) == -1) + { + result->set_error (errno); + ACELIB_ERROR ((LM_ERROR, + ACE_TEXT ("ACE_Uring_Asynch_Connect::connect_i: %p\n"), + ACE_TEXT ("setsockopt"))); + } + } + } + + if (result->error () == 0 && local_sap != ACE_Addr::sap_any) + { + sockaddr *const laddr = reinterpret_cast (local_sap.get_addr ()); + size_t const size = local_sap.get_size (); + + if (ACE_OS::bind (handle, laddr, size) == -1) + { + result->set_error (errno); + ACELIB_ERROR ((LM_ERROR, + ACE_TEXT ("ACE_Uring_Asynch_Connect::connect_i: %p\n"), + ACE_TEXT ("bind"))); + } + } + + // Set non-blocking mode. + if (result->error () == 0 && ACE::set_flags (handle, ACE_NONBLOCK) != 0) + { + result->set_error (errno); + ACELIB_ERROR ((LM_ERROR, + ACE_TEXT ("ACE_Uring_Asynch_Connect::connect_i: %p\n"), + ACE_TEXT ("set_flags"))); + } + + if (result->error () == 0) + { + ACE_GUARD_RETURN (ACE_Thread_Mutex, ace_mon, this->uring_proactor_->sq_mutex (), -1); + struct io_uring_sqe *const sqe = this->uring_proactor_->get_sqe (); + if (!sqe) + { + result->set_error (EAGAIN); + errno = EAGAIN; + } + else + { + ::io_uring_prep_connect (sqe, + connect_handle, + (struct sockaddr *) remote_sap.get_addr (), + remote_sap.get_size ()); + ::io_uring_sqe_set_data (sqe, result); + this->register_result (result); + int const submit_result = this->uring_proactor_->submit_sqe (); + if (submit_result < 0) + { + this->unregister_result (result); + errno = -submit_result; + result->set_error (errno); + } + else + { + success = true; + } + } + } + + if (success) + return 0; + + if (created_handle && handle != ACE_INVALID_HANDLE) + { + ACE_OS::closesocket (handle); + result->connect_handle (ACE_INVALID_HANDLE); + } + delete result; + return -1; +} + +// --------------------------------------------------------------------------- +// Datagram Impl +// --------------------------------------------------------------------------- + +ACE_Uring_Asynch_Read_Dgram_Result::ACE_Uring_Asynch_Read_Dgram_Result ( + const ACE_Handler::Proxy_Ptr &handler_proxy, + ACE_HANDLE handle, + ACE_Message_Block *message_block, + size_t bytes_to_read, + int flags, + int protocol_family, + const void *act, + ACE_Proactor *proactor) + : ACE_Uring_Asynch_Result (handler_proxy, + act, + handle, + 0, + 0, + proactor) + , message_block_ (message_block) + , bytes_to_read_ (bytes_to_read) + , flags_ (flags) +{ + ACE_OS::memset (&this->msg_, 0, sizeof (this->msg_)); + this->iov_.iov_base = message_block->wr_ptr (); + this->iov_.iov_len = (unsigned int)bytes_to_read; + this->msg_.msg_iov = &this->iov_; + this->msg_.msg_iovlen = 1; + this->msg_.msg_name = &this->remote_addr_; + this->msg_.msg_namelen = sizeof (this->remote_addr_); + ACE_UNUSED_ARG (protocol_family); +} + +ACE_Message_Block * +ACE_Uring_Asynch_Read_Dgram_Result::message_block (void) const +{ + return this->message_block_; +} + +void +ACE_Uring_Asynch_Read_Dgram_Result::complete (size_t bytes_transferred, + int success, + const void *, + u_long error) +{ + this->bytes_transferred_ = bytes_transferred; + this->error_ = error; + if (success && this->message_block_ != 0) + this->message_block_->wr_ptr (bytes_transferred); + + ACE_Handler *const handler = this->handler (); + if (handler != 0) + { + ACE_Asynch_Read_Dgram::Result result (this); + handler->handle_read_dgram (result); + } + delete this; +} + +int +ACE_Uring_Asynch_Read_Dgram_Result::remote_address (ACE_Addr &addr) const +{ + ACE_OS::memcpy (addr.get_addr (), &this->remote_addr_, this->msg_.msg_namelen); + addr.set_size (this->msg_.msg_namelen); + return 0; +} + +int +ACE_Uring_Asynch_Read_Dgram_Result::flags (void) const +{ + return this->flags_; +} + +ACE_HANDLE +ACE_Uring_Asynch_Read_Dgram_Result::handle (void) const +{ + return this->handle_; +} + +size_t +ACE_Uring_Asynch_Read_Dgram_Result::bytes_to_read (void) const +{ + return this->bytes_to_read_; +} + +struct msghdr * +ACE_Uring_Asynch_Read_Dgram_Result::msg (void) +{ + return &this->msg_; +} + +ACE_Uring_Asynch_Read_Dgram::ACE_Uring_Asynch_Read_Dgram (ACE_Uring_Proactor *proactor) + : ACE_Uring_Asynch_Operation (proactor) +{ +} + +ssize_t +ACE_Uring_Asynch_Read_Dgram::recv (ACE_Message_Block *message_block, + size_t &/*number_of_bytes_recvd*/, + int flags, + int protocol_family, + const void *act, + int, + int) +{ + ACE_Uring_Asynch_Read_Dgram_Result *result = 0; + ACE_NEW_RETURN (result, + ACE_Uring_Asynch_Read_Dgram_Result (this->handler_proxy_, + this->handle_, + message_block, + message_block->space (), + flags, + protocol_family, + act, + this->proactor_), + -1); + ACE_GUARD_RETURN (ACE_Thread_Mutex, ace_mon, this->uring_proactor_->sq_mutex (), -1); + struct io_uring_sqe *const sqe = this->uring_proactor_->get_sqe (); + if (!sqe) + { + delete result; + errno = EAGAIN; + return -1; + } + + ::io_uring_prep_recvmsg (sqe, this->handle_, result->msg (), flags); + ::io_uring_sqe_set_data (sqe, result); + return this->submit_result (result); +} + +ACE_Uring_Asynch_Write_Dgram_Result::ACE_Uring_Asynch_Write_Dgram_Result ( + const ACE_Handler::Proxy_Ptr &handler_proxy, + ACE_HANDLE handle, + ACE_Message_Block *message_block, + size_t bytes_to_write, + int flags, + const void *act, + ACE_Proactor *proactor) + : ACE_Uring_Asynch_Result (handler_proxy, + act, + handle, + 0, + 0, + proactor) + , message_block_ (message_block) + , bytes_to_write_ (bytes_to_write) + , flags_ (flags) + , remote_addr_len_ (0) +{ + ACE_OS::memset (&this->msg_, 0, sizeof (this->msg_)); + ACE_OS::memset (&this->remote_addr_, 0, sizeof (this->remote_addr_)); + this->iov_.iov_base = message_block->rd_ptr (); + this->iov_.iov_len = (unsigned int)bytes_to_write; + this->msg_.msg_iov = &this->iov_; + this->msg_.msg_iovlen = 1; +} + +ACE_Message_Block * +ACE_Uring_Asynch_Write_Dgram_Result::message_block (void) const +{ + return this->message_block_; +} + +void +ACE_Uring_Asynch_Write_Dgram_Result::complete (size_t bytes_transferred, + int success, + const void *, + u_long error) +{ + this->bytes_transferred_ = bytes_transferred; + this->error_ = error; + if (success && this->message_block_ != 0) + this->message_block_->rd_ptr (bytes_transferred); + + ACE_Handler *const handler = this->handler (); + if (handler != 0) + { + ACE_Asynch_Write_Dgram::Result result (this); + handler->handle_write_dgram (result); + } + delete this; +} + +int +ACE_Uring_Asynch_Write_Dgram_Result::flags (void) const +{ + return this->flags_; +} + +ACE_HANDLE +ACE_Uring_Asynch_Write_Dgram_Result::handle (void) const +{ + return this->handle_; +} + +size_t +ACE_Uring_Asynch_Write_Dgram_Result::bytes_to_write (void) const +{ + return this->bytes_to_write_; +} + +struct msghdr * +ACE_Uring_Asynch_Write_Dgram_Result::msg (void) +{ + return &this->msg_; +} + +int +ACE_Uring_Asynch_Write_Dgram_Result::remote_address (const ACE_Addr &addr) +{ + if (addr.get_addr () == 0 + || addr.get_size () == 0 + || static_cast (addr.get_size ()) > sizeof (this->remote_addr_)) + { + errno = EINVAL; + return -1; + } + + ACE_OS::memcpy (&this->remote_addr_, addr.get_addr (), addr.get_size ()); + this->remote_addr_len_ = static_cast (addr.get_size ()); + this->msg_.msg_name = &this->remote_addr_; + this->msg_.msg_namelen = this->remote_addr_len_; + return 0; +} + +ACE_Uring_Asynch_Write_Dgram::ACE_Uring_Asynch_Write_Dgram (ACE_Uring_Proactor *proactor) + : ACE_Uring_Asynch_Operation (proactor) +{ +} + +ssize_t +ACE_Uring_Asynch_Write_Dgram::send (ACE_Message_Block *message_block, + size_t &/*number_of_bytes_sent*/, + int flags, + const ACE_Addr &remote_addr, + const void *act, + int, + int) +{ + ACE_Uring_Asynch_Write_Dgram_Result *result = 0; + ACE_NEW_RETURN (result, + ACE_Uring_Asynch_Write_Dgram_Result (this->handler_proxy_, + this->handle_, + message_block, + message_block->length (), + flags, + act, + this->proactor_), + -1); + if (result->remote_address (remote_addr) != 0) + { + delete result; + return -1; + } + + ACE_GUARD_RETURN (ACE_Thread_Mutex, ace_mon, this->uring_proactor_->sq_mutex (), -1); + struct io_uring_sqe *const sqe = this->uring_proactor_->get_sqe (); + if (!sqe) + { + delete result; + errno = EAGAIN; + return -1; + } + + ::io_uring_prep_sendmsg (sqe, this->handle_, result->msg (), flags); + ::io_uring_sqe_set_data (sqe, result); + return this->submit_result (result); +} + +// --------------------------------------------------------------------------- +// Transmit File Impl +// --------------------------------------------------------------------------- + +ACE_Uring_Asynch_Transmit_File_Result::ACE_Uring_Asynch_Transmit_File_Result ( + const ACE_Handler::Proxy_Ptr &handler_proxy, + ACE_HANDLE socket, + ACE_HANDLE file, + ACE_Asynch_Transmit_File::Header_And_Trailer *header_and_trailer, + size_t bytes_to_write, + u_long offset, + u_long offset_high, + size_t bytes_per_send, + u_long flags, + const void *act, + ACE_Proactor *proactor) + : ACE_Uring_Asynch_Result (handler_proxy, + act, + socket, + offset, + offset_high, + proactor) + , file_ (file) + , header_and_trailer_ (header_and_trailer) + , bytes_to_write_ (bytes_to_write) + , bytes_per_send_ (bytes_per_send) + , flags_ (flags) +{ +} + +ACE_HANDLE +ACE_Uring_Asynch_Transmit_File_Result::socket (void) const +{ + return this->handle_; +} + +ACE_HANDLE +ACE_Uring_Asynch_Transmit_File_Result::file (void) const +{ + return this->file_; +} + +ACE_Asynch_Transmit_File::Header_And_Trailer * +ACE_Uring_Asynch_Transmit_File_Result::header_and_trailer (void) const +{ + return this->header_and_trailer_; +} + +size_t +ACE_Uring_Asynch_Transmit_File_Result::bytes_to_write (void) const +{ + return this->bytes_to_write_; +} + +size_t +ACE_Uring_Asynch_Transmit_File_Result::bytes_per_send (void) const +{ + return this->bytes_per_send_; +} + +u_long +ACE_Uring_Asynch_Transmit_File_Result::flags (void) const +{ + return this->flags_; +} + +void +ACE_Uring_Asynch_Transmit_File_Result::complete (size_t bytes_transferred, + int /*success*/, + const void *, + u_long error) +{ + this->bytes_transferred_ = bytes_transferred; + this->error_ = error; + ACE_Handler *const handler = this->handler (); + if (handler != 0) + { + ACE_Asynch_Transmit_File::Result result (this); + handler->handle_transmit_file (result); + } + delete this; +} + +ACE_Uring_Asynch_Transmit_File::ACE_Uring_Asynch_Transmit_File (ACE_Uring_Proactor *proactor) + : ACE_Uring_Asynch_Operation (proactor) +{ +} + +int +ACE_Uring_Asynch_Transmit_File::transmit_file ( + ACE_HANDLE file, + ACE_Asynch_Transmit_File::Header_And_Trailer *header_and_trailer, + size_t bytes_to_write, + u_long offset, + u_long offset_high, + size_t bytes_per_send, + u_long flags, + const void *act, + int, + int) +{ + ACE_UNUSED_ARG (file); + ACE_UNUSED_ARG (header_and_trailer); + ACE_UNUSED_ARG (bytes_to_write); + ACE_UNUSED_ARG (offset); + ACE_UNUSED_ARG (offset_high); + ACE_UNUSED_ARG (bytes_per_send); + ACE_UNUSED_ARG (flags); + ACE_UNUSED_ARG (act); + + // The previous implementation used splice(file -> socket), which is not + // valid on Linux because one splice endpoint must be a pipe. Fail fast + // until a real async transmit-file strategy is implemented for io_uring. + errno = ENOTSUP; + return -1; +} + +ACE_END_VERSIONED_NAMESPACE_DECL + +#endif /* ACE_HAS_AIO_CALLS && ACE_HAS_IO_URING */ diff --git a/ACE/ace/Uring_Asynch_IO.h b/ACE/ace/Uring_Asynch_IO.h new file mode 100644 index 0000000000000..81bf34e7814ad --- /dev/null +++ b/ACE/ace/Uring_Asynch_IO.h @@ -0,0 +1,906 @@ +// -*- C++ -*- + +//============================================================================= +/** + * @file Uring_Asynch_IO.h + * + * The implementation classes for the Linux io_uring asynchronous + * operations are defined here in this file. + */ +//============================================================================= + +#ifndef ACE_URING_ASYNCH_IO_H +#define ACE_URING_ASYNCH_IO_H + +#include /**/ "ace/pre.h" + +#include /**/ "ace/config-all.h" + +#if !defined (ACE_LACKS_PRAGMA_ONCE) +#pragma once +#endif /* ACE_LACKS_PRAGMA_ONCE */ + +#if defined (ACE_HAS_AIO_CALLS) && defined (ACE_HAS_IO_URING) + +#include "ace/Asynch_IO_Impl.h" +#include "ace/Atomic_Op.h" +#include "ace/Guard_T.h" +#include "ace/Time_Value.h" +#include "ace/Unbounded_Set.h" +#include "ace/Uring_Proactor.h" + +#include +#include + +ACE_BEGIN_VERSIONED_NAMESPACE_DECL + +class ACE_Uring_Asynch_Operation; + +/** + * @class ACE_Uring_Asynch_Result + * + * @brief Base class for io_uring completion tokens. + * + * Concrete asynchronous result types derive from this class and adapt + * an io_uring completion back into the corresponding ACE result + * callback. + */ +class ACE_Export ACE_Uring_Asynch_Result + : public virtual ACE_Asynch_Result_Impl +{ +public: + /// Initialize the shared completion state for an io_uring result. + ACE_Uring_Asynch_Result (const ACE_Handler::Proxy_Ptr &handler_proxy, + const void *act, + ACE_HANDLE handle, + u_long offset, + u_long offset_high, + ACE_Proactor *proactor); + + /// Release any resources retained by the result wrapper. + virtual ~ACE_Uring_Asynch_Result (void); + + /// Dispatch the completed operation to the concrete ACE handler hook. + virtual void complete (size_t bytes_transferred, + int success, + const void *completion_key, + u_long error = 0) = 0; + + /// Return the number of bytes reported by the completion. + virtual size_t bytes_transferred (void) const; + + /// Store the number of bytes reported by the completion. + virtual void set_bytes_transferred (size_t n); + + /// Return the translated completion error code. + virtual u_long error (void) const; + + /// Store the translated completion error code. + virtual void set_error (u_long err); + + /// Return the user-supplied asynchronous completion token. + virtual const void *act (void) const; + + /// Return non-zero if the operation completed successfully. + virtual int success (void) const; + + /// Return the completion key associated with the handler. + virtual const void *completion_key (void) const; + + /// Return the event handle associated with the result, if any. + virtual ACE_HANDLE event (void) const; + + /// Return the low 32 bits of the operation file offset. + virtual u_long offset (void) const; + + /// Return the high 32 bits of the operation file offset. + virtual u_long offset_high (void) const; + + /// Return the completion priority stored with the result. + virtual int priority (void) const; + + /// Return the signal number stored with the result. + virtual int signal_number (void) const; + + /// Requeue the completion through the Proactor. + virtual int post_completion (ACE_Proactor_Impl *proactor); + + /// Return the original handler associated with the result. + ACE_Handler *handler (void) const; + + /// Return the handler that should receive completion dispatch. + ACE_Handler *dispatch_handler (void) const; + + /// Record the operation that currently owns this result. + void owner (ACE_Uring_Asynch_Operation *operation); + + /// Return the operation that currently owns this result. + ACE_Uring_Asynch_Operation *owner (void) const; + +protected: + ACE_Handler *handler_; + ACE_Handler::Proxy_Ptr handler_proxy_; + const void *act_; + ACE_HANDLE handle_; + u_long offset_; + u_long offset_high_; + ACE_Proactor *proactor_; + size_t bytes_transferred_; + u_long error_; + ACE_Atomic_Op owner_; +}; + +/** + * @class ACE_Uring_Asynch_Timer + * + * @brief Result object used for timer completions posted into the ring. + */ +class ACE_Export ACE_Uring_Asynch_Timer : public ACE_Uring_Asynch_Result +{ +public: + /// Create a timer completion result for the specified timeout. + ACE_Uring_Asynch_Timer (const ACE_Handler::Proxy_Ptr &handler_proxy, + const void *act, + const ACE_Time_Value &tv, + ACE_Proactor *proactor); + + /// Deliver the timer expiration callback. + virtual void complete (size_t bytes_transferred, + int success, + const void *completion_key, + u_long error = 0); + +private: + ACE_Time_Value time_; +}; + +/** + * @class ACE_Uring_Asynch_Operation + * + * @brief Base class for io_uring-backed asynchronous operations. + */ +class ACE_Export ACE_Uring_Asynch_Operation + : public virtual ACE_Asynch_Operation_Impl +{ +public: + /// Associate the operation with its handler, handle, and Proactor. + virtual int open (const ACE_Handler::Proxy_Ptr &handler_proxy, + ACE_HANDLE handle, + const void *completion_key, + ACE_Proactor *proactor); + + /// Cancel any results currently tracked by this operation. + virtual int cancel (void); + + /// Return the owning Proactor. + virtual ACE_Proactor *proactor (void) const; + + /// Return the handler currently bound to the operation. + ACE_Handler *handler (void); + + /// Queue a completion for later submission to the Proactor. + int queue_result (ACE_Uring_Asynch_Result *result); + + /// Submit a completion immediately to the Proactor. + int submit_result (ACE_Uring_Asynch_Result *result); + + /// Add a result to the set of in-flight operations. + void register_result (ACE_Uring_Asynch_Result *result); + + /// Remove a result from the set of in-flight operations. + void unregister_result (ACE_Uring_Asynch_Result *result); + +protected: + /// Construct an operation helper bound to an io_uring Proactor. + ACE_Uring_Asynch_Operation (ACE_Uring_Proactor *proactor); + + /// Destroy the operation helper after all results have been released. + virtual ~ACE_Uring_Asynch_Operation (void); + + ACE_Uring_Proactor *uring_proactor_; + ACE_Proactor *proactor_; + ACE_Handler::Proxy_Ptr handler_proxy_; + ACE_HANDLE handle_; + ACE_Thread_Mutex pending_results_lock_; + ACE_Unbounded_Set pending_results_; +}; + +// --------------------------------------------------------------------------- +// Read stream and file +// --------------------------------------------------------------------------- + +/** + * @class ACE_Uring_Asynch_Read_Stream_Result + * + * @brief io_uring implementation of the read stream/file result. + */ +class ACE_Export ACE_Uring_Asynch_Read_Stream_Result + : public ACE_Uring_Asynch_Result, + public ACE_Asynch_Read_File_Result_Impl +{ +public: + /// Create a stream-read result wrapper for stream and file reads. + ACE_Uring_Asynch_Read_Stream_Result (const ACE_Handler::Proxy_Ptr &handler_proxy, + ACE_HANDLE handle, + ACE_Message_Block *message_block, + size_t bytes_to_read, + const void *act, + ACE_Proactor *proactor, + u_long offset = 0, + u_long offset_high = 0, + bool vectored = false, + struct iovec *iovec = 0); + + /// Release any temporary vectored-I/O state. + virtual ~ACE_Uring_Asynch_Read_Stream_Result (void); + + /// Deliver the read completion callback. + virtual void complete (size_t bytes_transferred, + int success, + const void *completion_key, + u_long error = 0); + + /// Return the requested transfer size. + virtual size_t bytes_to_read (void) const; + + /// Return the message block used for the transfer. + virtual ACE_Message_Block &message_block (void) const; + + /// Return the handle associated with the transfer. + virtual ACE_HANDLE handle (void) const; + +protected: + ACE_Message_Block *message_block_; + size_t bytes_to_read_; + bool vectored_; + struct iovec *iovec_; +}; + +/** + * @class ACE_Uring_Asynch_Read_Stream + * + * @brief io_uring implementation of asynchronous stream reads. + */ +class ACE_Export ACE_Uring_Asynch_Read_Stream + : public ACE_Uring_Asynch_Operation, + public virtual ACE_Asynch_Read_Stream_Impl +{ +public: + /// Construct a stream-read initiator for the given Proactor. + ACE_Uring_Asynch_Read_Stream (ACE_Uring_Proactor *proactor); + + /// Start a single-buffer asynchronous stream read. + virtual int read (ACE_Message_Block &message_block, + size_t num_bytes_to_read, + const void *act, + int priority, + int signal_number); + + /// Start a vectored asynchronous stream read. + virtual int readv (ACE_Message_Block &message_block, + size_t num_bytes_to_read, + const void *act, + int priority, + int signal_number); +}; + +/** + * @class ACE_Uring_Asynch_Read_File + * + * @brief io_uring implementation of asynchronous file reads. + */ +class ACE_Export ACE_Uring_Asynch_Read_File + : public ACE_Uring_Asynch_Read_Stream, + public ACE_Asynch_Read_File_Impl +{ +public: + /// Construct a file-read initiator for the given Proactor. + ACE_Uring_Asynch_Read_File (ACE_Uring_Proactor *proactor); + + /// Start a positioned file read. + virtual int read (ACE_Message_Block &message_block, + size_t num_bytes_to_read, + u_long offset, + u_long offset_high, + const void *act, + int priority, + int signal_number); + + /// Start a sequential file read using the current file position. + virtual int read (ACE_Message_Block &message_block, + size_t num_bytes_to_read, + const void *act, + int priority, + int signal_number); + + /// Start a positioned vectored file read. + virtual int readv (ACE_Message_Block &message_block, + size_t num_bytes_to_read, + u_long offset, + u_long offset_high, + const void *act, + int priority, + int signal_number); + + /// Start a sequential vectored file read. + virtual int readv (ACE_Message_Block &message_block, + size_t num_bytes_to_read, + const void *act, + int priority, + int signal_number); +}; + +/** + * @class ACE_Uring_Asynch_Read_File_Result + * + * @brief io_uring implementation of the file-read result. + */ +class ACE_Export ACE_Uring_Asynch_Read_File_Result + : public ACE_Uring_Asynch_Read_Stream_Result +{ +public: + /// Create a file-read result wrapper. + ACE_Uring_Asynch_Read_File_Result (const ACE_Handler::Proxy_Ptr &handler_proxy, + ACE_HANDLE handle, + ACE_Message_Block *message_block, + size_t bytes_to_read, + const void *act, + ACE_Proactor *proactor, + u_long offset = 0, + u_long offset_high = 0, + bool vectored = false, + struct iovec *iovec = 0); + + /// Deliver the file-read completion callback. + virtual void complete (size_t bytes_transferred, + int success, + const void *completion_key, + u_long error = 0); +}; + +// --------------------------------------------------------------------------- +// Write stream and file +// --------------------------------------------------------------------------- + +/** + * @class ACE_Uring_Asynch_Write_Stream_Result + * + * @brief io_uring implementation of the write stream/file result. + */ +class ACE_Export ACE_Uring_Asynch_Write_Stream_Result + : public ACE_Uring_Asynch_Result, + public ACE_Asynch_Write_File_Result_Impl +{ +public: + /// Create a stream-write result wrapper for stream and file writes. + ACE_Uring_Asynch_Write_Stream_Result + (const ACE_Handler::Proxy_Ptr &handler_proxy, + ACE_HANDLE handle, + ACE_Message_Block *message_block, + size_t bytes_to_write, + const void *act, + ACE_Proactor *proactor, + u_long offset = 0, + u_long offset_high = 0, + bool vectored = false, + struct iovec *iovec = 0); + + /// Release any temporary vectored-I/O state. + virtual ~ACE_Uring_Asynch_Write_Stream_Result (void); + + /// Deliver the write completion callback. + virtual void complete (size_t bytes_transferred, + int success, + const void *completion_key, + u_long error = 0); + + /// Return the requested transfer size. + virtual size_t bytes_to_write (void) const; + + /// Return the message block used for the transfer. + virtual ACE_Message_Block &message_block (void) const; + + /// Return the handle associated with the transfer. + virtual ACE_HANDLE handle (void) const; + +protected: + ACE_Message_Block *message_block_; + size_t bytes_to_write_; + bool vectored_; + struct iovec *iovec_; +}; + +/** + * @class ACE_Uring_Asynch_Write_Stream + * + * @brief io_uring implementation of asynchronous stream writes. + */ +class ACE_Export ACE_Uring_Asynch_Write_Stream + : public ACE_Uring_Asynch_Operation, + public virtual ACE_Asynch_Write_Stream_Impl +{ +public: + /// Construct a stream-write initiator for the given Proactor. + ACE_Uring_Asynch_Write_Stream (ACE_Uring_Proactor *proactor); + + /// Start a single-buffer asynchronous stream write. + virtual int write (ACE_Message_Block &message_block, + size_t bytes_to_write, + const void *act, + int priority, + int signal_number); + + /// Start a vectored asynchronous stream write. + virtual int writev (ACE_Message_Block &message_block, + size_t bytes_to_write, + const void *act, + int priority, + int signal_number); +}; + +/** + * @class ACE_Uring_Asynch_Write_File + * + * @brief io_uring implementation of asynchronous file writes. + */ +class ACE_Export ACE_Uring_Asynch_Write_File + : public ACE_Uring_Asynch_Write_Stream, + public ACE_Asynch_Write_File_Impl +{ +public: + /// Construct a file-write initiator for the given Proactor. + ACE_Uring_Asynch_Write_File (ACE_Uring_Proactor *proactor); + + /// Start a positioned file write. + virtual int write (ACE_Message_Block &message_block, + size_t bytes_to_write, + u_long offset, + u_long offset_high, + const void *act, + int priority, + int signal_number); + + /// Start a sequential file write using the current file position. + virtual int write (ACE_Message_Block &message_block, + size_t bytes_to_write, + const void *act, + int priority, + int signal_number); + + /// Start a positioned vectored file write. + virtual int writev (ACE_Message_Block &message_block, + size_t bytes_to_write, + u_long offset, + u_long offset_high, + const void *act, + int priority, + int signal_number); + + /// Start a sequential vectored file write. + virtual int writev (ACE_Message_Block &message_block, + size_t bytes_to_write, + const void *act, + int priority, + int signal_number); +}; + +/** + * @class ACE_Uring_Asynch_Write_File_Result + * + * @brief io_uring implementation of the file-write result. + */ +class ACE_Export ACE_Uring_Asynch_Write_File_Result + : public ACE_Uring_Asynch_Write_Stream_Result +{ +public: + /// Create a file-write result wrapper. + ACE_Uring_Asynch_Write_File_Result + (const ACE_Handler::Proxy_Ptr &handler_proxy, + ACE_HANDLE handle, + ACE_Message_Block *message_block, + size_t bytes_to_write, + const void *act, + ACE_Proactor *proactor, + u_long offset = 0, + u_long offset_high = 0, + bool vectored = false, + struct iovec *iovec = 0); + + /// Deliver the file-write completion callback. + virtual void complete (size_t bytes_transferred, + int success, + const void *completion_key, + u_long error = 0); +}; + +// --------------------------------------------------------------------------- +// Accept and connect +// --------------------------------------------------------------------------- + +/** + * @class ACE_Uring_Asynch_Accept_Result + * + * @brief io_uring implementation of the accept result. + */ +class ACE_Export ACE_Uring_Asynch_Accept_Result + : public ACE_Uring_Asynch_Result, + public ACE_Asynch_Accept_Result_Impl +{ +public: + /// Create an accept result wrapper for a pending accept operation. + ACE_Uring_Asynch_Accept_Result (const ACE_Handler::Proxy_Ptr &handler_proxy, + ACE_HANDLE listen_handle, + ACE_HANDLE accept_handle, + ACE_Message_Block *message_block, + size_t bytes_to_read, + const void *act, + ACE_Proactor *proactor); + + /// Deliver the accept completion callback. + virtual void complete (size_t bytes_transferred, + int success, + const void *completion_key, + u_long error = 0); + + /// Return the socket accepted by the operation. + virtual ACE_HANDLE accept_handle (void) const; + + /// Return the message block used for the optional initial read. + virtual ACE_Message_Block &message_block (void) const; + + /// Return the listening socket on which accept was started. + virtual ACE_HANDLE listen_handle (void) const; + + /// Return the requested size of the initial read. + virtual size_t bytes_to_read (void) const; + + /// Return the storage used for the peer address. + struct sockaddr *addr (void); + + /// Return the length field used with the peer address storage. + socklen_t *addrlen (void); + +private: + ACE_HANDLE accept_handle_; + ACE_Message_Block *message_block_; + size_t bytes_to_read_; + struct sockaddr_storage client_addr_; + socklen_t addr_len_; +}; + +/** + * @class ACE_Uring_Asynch_Accept + * + * @brief io_uring implementation of asynchronous accepts. + */ +class ACE_Export ACE_Uring_Asynch_Accept + : public ACE_Uring_Asynch_Operation, + public ACE_Asynch_Accept_Impl +{ +public: + /// Construct an accept initiator for the given Proactor. + ACE_Uring_Asynch_Accept (ACE_Uring_Proactor *proactor); + + /// Start an asynchronous accept operation. + virtual int accept (ACE_Message_Block &message_block, + size_t bytes_to_read, + ACE_HANDLE accept_handle, + const void *act, + int priority, + int signal_number, + int addr_family); +}; + +/** + * @class ACE_Uring_Asynch_Connect_Result + * + * @brief io_uring implementation of the connect result. + */ +class ACE_Export ACE_Uring_Asynch_Connect_Result + : public ACE_Uring_Asynch_Result, + public ACE_Asynch_Connect_Result_Impl +{ +public: + /// Create a connect result wrapper. + ACE_Uring_Asynch_Connect_Result (const ACE_Handler::Proxy_Ptr &handler_proxy, + ACE_HANDLE connect_handle, + const void *act, + ACE_Proactor *proactor); + + /// Deliver the connect completion callback. + virtual void complete (size_t bytes_transferred, + int success, + const void *completion_key, + u_long error = 0); + + /// Return the socket associated with the connection attempt. + virtual ACE_HANDLE connect_handle (void) const; + + /// Update the socket associated with the connection attempt. + void connect_handle (ACE_HANDLE handle); + +private: + ACE_HANDLE connect_handle_; +}; + +/** + * @class ACE_Uring_Asynch_Connect + * + * @brief io_uring implementation of asynchronous connects. + */ +class ACE_Export ACE_Uring_Asynch_Connect + : public ACE_Uring_Asynch_Operation, + public ACE_Asynch_Connect_Impl +{ +public: + /// Construct a connect initiator for the given Proactor. + ACE_Uring_Asynch_Connect (ACE_Uring_Proactor *proactor); + + /// Bind the initiator to a handler, handle, and Proactor. + virtual int open (const ACE_Handler::Proxy_Ptr &handler_proxy, + ACE_HANDLE handle, + const void *completion_key, + ACE_Proactor *proactor); + + /// Start an asynchronous connect operation. + virtual int connect (ACE_HANDLE connect_handle, + const ACE_Addr &remote_sap, + const ACE_Addr &local_sap, + int reuse_addr, + const void *act, + int priority, + int signal_number); +}; + +// --------------------------------------------------------------------------- +// Datagram (UDP) +// --------------------------------------------------------------------------- + +/** + * @class ACE_Uring_Asynch_Read_Dgram_Result + * + * @brief io_uring implementation of the datagram read result. + */ +class ACE_Export ACE_Uring_Asynch_Read_Dgram_Result + : public ACE_Uring_Asynch_Result, + public ACE_Asynch_Read_Dgram_Result_Impl +{ +public: + /// Create a datagram-read result wrapper. + ACE_Uring_Asynch_Read_Dgram_Result + (const ACE_Handler::Proxy_Ptr &handler_proxy, + ACE_HANDLE handle, + ACE_Message_Block *message_block, + size_t bytes_to_read, + int flags, + int protocol_family, + const void *act, + ACE_Proactor *proactor); + + /// Deliver the datagram-read completion callback. + virtual void complete (size_t bytes_transferred, + int success, + const void *completion_key, + u_long error = 0); + + /// Return the message block used for the transfer. + virtual ACE_Message_Block *message_block (void) const; + + /// Copy out the remote peer address captured for the datagram. + virtual int remote_address (ACE_Addr &addr) const; + + /// Return the receive flags stored with the result. + virtual int flags (void) const; + + /// Return the socket handle used for the transfer. + virtual ACE_HANDLE handle (void) const; + + /// Return the requested size of the datagram read. + virtual size_t bytes_to_read (void) const; + + /// Return the msghdr used to submit the receive request. + struct msghdr *msg (void); + +private: + ACE_Message_Block *message_block_; + size_t bytes_to_read_; + int flags_; + struct msghdr msg_; + struct iovec iov_; + struct sockaddr_storage remote_addr_; +}; + +/** + * @class ACE_Uring_Asynch_Read_Dgram + * + * @brief io_uring implementation of asynchronous datagram receives. + */ +class ACE_Export ACE_Uring_Asynch_Read_Dgram + : public ACE_Uring_Asynch_Operation, + public ACE_Asynch_Read_Dgram_Impl +{ +public: + /// Construct a datagram-receive initiator for the given Proactor. + ACE_Uring_Asynch_Read_Dgram (ACE_Uring_Proactor *proactor); + + /// Start an asynchronous datagram receive. + virtual ssize_t recv (ACE_Message_Block *message_block, + size_t &number_of_bytes_recvd, + int flags, + int protocol_family, + const void *act, + int priority, + int signal_number); +}; + +/** + * @class ACE_Uring_Asynch_Write_Dgram_Result + * + * @brief io_uring implementation of the datagram write result. + */ +class ACE_Export ACE_Uring_Asynch_Write_Dgram_Result + : public ACE_Uring_Asynch_Result, + public ACE_Asynch_Write_Dgram_Result_Impl +{ +public: + /// Create a datagram-write result wrapper. + ACE_Uring_Asynch_Write_Dgram_Result + (const ACE_Handler::Proxy_Ptr &handler_proxy, + ACE_HANDLE handle, + ACE_Message_Block *message_block, + size_t bytes_to_write, + int flags, + const void *act, + ACE_Proactor *proactor); + + /// Deliver the datagram-write completion callback. + virtual void complete (size_t bytes_transferred, + int success, + const void *completion_key, + u_long error = 0); + + /// Return the message block used for the transfer. + virtual ACE_Message_Block *message_block (void) const; + + /// Return the send flags stored with the result. + virtual int flags (void) const; + + /// Return the socket handle used for the transfer. + virtual ACE_HANDLE handle (void) const; + + /// Return the requested size of the datagram write. + virtual size_t bytes_to_write (void) const; + + /// Return the msghdr used to submit the send request. + struct msghdr *msg (void); + + /// Store the destination address for the datagram send. + int remote_address (const ACE_Addr &addr); + +private: + ACE_Message_Block *message_block_; + size_t bytes_to_write_; + int flags_; + struct msghdr msg_; + struct iovec iov_; + struct sockaddr_storage remote_addr_; + socklen_t remote_addr_len_; +}; + +/** + * @class ACE_Uring_Asynch_Write_Dgram + * + * @brief io_uring implementation of asynchronous datagram sends. + */ +class ACE_Export ACE_Uring_Asynch_Write_Dgram + : public ACE_Uring_Asynch_Operation, + public ACE_Asynch_Write_Dgram_Impl +{ +public: + /// Construct a datagram-send initiator for the given Proactor. + ACE_Uring_Asynch_Write_Dgram (ACE_Uring_Proactor *proactor); + + /// Start an asynchronous datagram send. + virtual ssize_t send (ACE_Message_Block *message_block, + size_t &number_of_bytes_sent, + int flags, + const ACE_Addr &remote_addr, + const void *act, + int priority, + int signal_number); +}; + +// --------------------------------------------------------------------------- +// Transmit file +// --------------------------------------------------------------------------- + +/** + * @class ACE_Uring_Asynch_Transmit_File_Result + * + * @brief io_uring implementation of the transmit file result. + */ +class ACE_Export ACE_Uring_Asynch_Transmit_File_Result + : public ACE_Uring_Asynch_Result, + public ACE_Asynch_Transmit_File_Result_Impl +{ +public: + /// Create a transmit-file result wrapper. + ACE_Uring_Asynch_Transmit_File_Result + (const ACE_Handler::Proxy_Ptr &handler_proxy, + ACE_HANDLE socket, + ACE_HANDLE file, + ACE_Asynch_Transmit_File::Header_And_Trailer *header_and_trailer, + size_t bytes_to_write, + u_long offset, + u_long offset_high, + size_t bytes_per_send, + u_long flags, + const void *act, + ACE_Proactor *proactor); + + /// Deliver the transmit-file completion callback. + virtual void complete (size_t bytes_transferred, + int success, + const void *completion_key, + u_long error = 0); + + /// Return the socket receiving the transmitted data. + virtual ACE_HANDLE socket (void) const; + + /// Return the file supplying the transmitted data. + virtual ACE_HANDLE file (void) const; + + /// Return the optional header and trailer buffers. + virtual ACE_Asynch_Transmit_File::Header_And_Trailer * + header_and_trailer (void) const; + + /// Return the requested file byte count. + virtual size_t bytes_to_write (void) const; + + /// Return the per-send throttling limit. + virtual size_t bytes_per_send (void) const; + + /// Return the transmit-file flags. + virtual u_long flags (void) const; + +private: + ACE_HANDLE file_; + ACE_Asynch_Transmit_File::Header_And_Trailer *header_and_trailer_; + size_t bytes_to_write_; + size_t bytes_per_send_; + u_long flags_; +}; + +/** + * @class ACE_Uring_Asynch_Transmit_File + * + * @brief io_uring implementation of asynchronous transmit-file requests. + */ +class ACE_Export ACE_Uring_Asynch_Transmit_File + : public ACE_Uring_Asynch_Operation, + public ACE_Asynch_Transmit_File_Impl +{ +public: + /// Construct a transmit-file initiator for the given Proactor. + ACE_Uring_Asynch_Transmit_File (ACE_Uring_Proactor *proactor); + + /// Start an asynchronous transmit-file request. + virtual int transmit_file + (ACE_HANDLE file, + ACE_Asynch_Transmit_File::Header_And_Trailer *header_and_trailer, + size_t bytes_to_write, + u_long offset, + u_long offset_high, + size_t bytes_per_send, + u_long flags, + const void *act, + int priority, + int signal_number); +}; + +ACE_END_VERSIONED_NAMESPACE_DECL + +#endif /* ACE_HAS_AIO_CALLS && ACE_HAS_IO_URING */ + +#include /**/ "ace/post.h" +#endif /* ACE_URING_ASYNCH_IO_H */ diff --git a/ACE/ace/Uring_Proactor.cpp b/ACE/ace/Uring_Proactor.cpp new file mode 100644 index 0000000000000..1cd4ba3d1624e --- /dev/null +++ b/ACE/ace/Uring_Proactor.cpp @@ -0,0 +1,724 @@ +//============================================================================= +/** + * @file Uring_Proactor.cpp + * + * The Linux io_uring Proactor implementation. + */ +//============================================================================= + +#include "Uring_Proactor.h" + +#if defined (ACE_HAS_AIO_CALLS) && defined (ACE_HAS_IO_URING) + +#include "Uring_Asynch_IO.h" +#include "ace/Countdown_Time.h" +#include "ace/Log_Category.h" +#include "ace/OS_NS_errno.h" +#include "ace/OS_NS_sys_time.h" + +#include +#include +#include + +ACE_BEGIN_VERSIONED_NAMESPACE_DECL + +namespace +{ + enum + { + ACE_URING_SUBMIT_WAKE_USER_DATA = 1 + }; + + unsigned + queued_sqes (const io_uring &ring) + { + // For non-SQPOLL rings, khead tracks how many SQEs the kernel has + // consumed from user space, so sqe_tail - *khead is the number of + // prepared but unsubmitted/unconsumed entries. For SQPOLL, defer to + // liburing's helper because it uses the right acquire semantics. + if (ring.flags & IORING_SETUP_SQPOLL) + return ::io_uring_sq_ready (&ring); + + return ring.sq.sqe_tail - *ring.sq.khead; + } +} + +ACE_Uring_Proactor::ACE_Uring_Proactor (size_t entries) + : ring_ () + , is_initialized_ (false) + , submit_signal_pending_ (false) + , submit_wakeup_handle_ (ACE_INVALID_HANDLE) + , dispatch_thread_id_ () +{ + ACE_TRACE ("ACE_Uring_Proactor::ACE_Uring_Proactor"); + + int const ret = ::io_uring_queue_init (entries, &this->ring_, 0); + if (ret < 0) + { + errno = -ret; + ACELIB_ERROR ((LM_ERROR, + ACE_TEXT ("%p\n"), + ACE_TEXT ("ACE_Uring_Proactor::io_uring_queue_init"))); + } + else + { + this->submit_wakeup_handle_ = + ::eventfd (0, EFD_CLOEXEC | EFD_NONBLOCK); + if (this->submit_wakeup_handle_ == ACE_INVALID_HANDLE) + { + ACELIB_ERROR ((LM_ERROR, + ACE_TEXT ("%p\n"), + ACE_TEXT ("ACE_Uring_Proactor::eventfd"))); + ::io_uring_queue_exit (&this->ring_); + } + else + { + ACE_GUARD (ACE_Thread_Mutex, sq_guard, this->sq_mutex_); + int const submit_result = + this->arm_submit_wakeup_locked () == -1 + ? -1 + : ::io_uring_submit (&this->ring_); + if (submit_result < 0) + { + errno = submit_result == -1 ? errno : -submit_result; + ACELIB_ERROR ((LM_ERROR, + ACE_TEXT ("%p\n"), + ACE_TEXT ("ACE_Uring_Proactor::arm_submit_wakeup_locked"))); + ::close (this->submit_wakeup_handle_); + this->submit_wakeup_handle_ = ACE_INVALID_HANDLE; + ::io_uring_queue_exit (&this->ring_); + } + else + { + this->is_initialized_ = true; + } + } + } +} + +ACE_Uring_Proactor::~ACE_Uring_Proactor (void) +{ + ACE_TRACE ("ACE_Uring_Proactor::~ACE_Uring_Proactor"); + this->close (); +} + +int +ACE_Uring_Proactor::close (void) +{ + ACE_GUARD_RETURN (ACE_Thread_Mutex, sq_guard, this->sq_mutex_, -1); + ACE_GUARD_RETURN (ACE_Thread_Mutex, cq_guard, this->cq_mutex_, -1); + + if (!this->is_initialized_) + return 0; + + this->is_initialized_ = false; + ::io_uring_queue_exit (&this->ring_); + if (this->submit_wakeup_handle_ != ACE_INVALID_HANDLE) + { + ::close (this->submit_wakeup_handle_); + this->submit_wakeup_handle_ = ACE_INVALID_HANDLE; + } + return 0; +} + +int +ACE_Uring_Proactor::register_handle (ACE_HANDLE, const void *) +{ + return 0; +} + +int +ACE_Uring_Proactor::handle_events (ACE_Time_Value &wait_time) +{ + ACE_Countdown_Time countdown (&wait_time); + int const result = this->process_cqes (DEFAULT_CQE_BATCH_SIZE, &wait_time); + return result > 0 ? 1 : result; +} + +int +ACE_Uring_Proactor::handle_events (void) +{ + int const result = this->process_cqes (DEFAULT_CQE_BATCH_SIZE); + return result > 0 ? 1 : result; +} + +int +ACE_Uring_Proactor::wake_up_dispatch_threads (void) +{ + return 0; +} + +int +ACE_Uring_Proactor::close_dispatch_threads (int) +{ + return 0; +} + +size_t +ACE_Uring_Proactor::number_of_threads (void) const +{ + return 1; +} + +void +ACE_Uring_Proactor::number_of_threads (size_t) +{ +} + +ACE_HANDLE +ACE_Uring_Proactor::get_handle (void) const +{ + return ACE_INVALID_HANDLE; +} + +int +ACE_Uring_Proactor::process_cqes (int max_to_process, const ACE_Time_Value *wait_time) +{ + ACE_GUARD_RETURN (ACE_Thread_Mutex, dispatch_guard, this->dispatch_mutex_, -1); + + if (!this->is_initialized_) + return -1; + + this->dispatch_thread_id_ = ACE_OS::thr_self (); + + if (max_to_process < 1) + max_to_process = 1; + + int processed = 0; + + while (processed < max_to_process) + { + if (processed == 0) + { + ACE_GUARD_RETURN (ACE_Thread_Mutex, sq_guard, this->sq_mutex_, -1); + int const submit_result = this->submit_pending_sqe (); + if (submit_result < 0) + { + errno = -submit_result; + return -1; + } + } + + struct io_uring_cqe *cqe = 0; + ACE_Uring_Asynch_Result *result = 0; + ACE_Uring_Asynch_Operation *owner = 0; + size_t bytes_transferred = 0; + int error = 0; + int ret = 0; + bool const should_wait = (processed == 0); + + { + ACE_GUARD_RETURN (ACE_Thread_Mutex, guard, this->cq_mutex_, -1); + + if (!this->is_initialized_) + return -1; + + if (should_wait) + { + if (wait_time != 0) + { + if (*wait_time == ACE_Time_Value::zero) + ret = ::io_uring_peek_cqe (&this->ring_, &cqe); + else + { + ACE_Time_Value const local_wait_time = *wait_time; + struct __kernel_timespec timeout; + timeout.tv_sec = local_wait_time.sec (); + timeout.tv_nsec = local_wait_time.usec () * 1000; + ret = ::io_uring_wait_cqe_timeout (&this->ring_, + &cqe, + &timeout); + } + } + else + { + ret = ::io_uring_wait_cqe (&this->ring_, &cqe); + } + } + else + { + ret = ::io_uring_peek_cqe (&this->ring_, &cqe); + } + + if (ret < 0) + { + if (ret == -ETIME || ret == -EAGAIN || ret == -EINTR) + return processed; + + errno = -ret; + return -1; + } + + uintptr_t const data = ::io_uring_cqe_get_data64 (cqe); + if (data == ACE_URING_SUBMIT_WAKE_USER_DATA) + result = reinterpret_cast (ACE_URING_SUBMIT_WAKE_USER_DATA); + else + result = reinterpret_cast (data); + error = (cqe->res < 0) ? -cqe->res : 0; + bytes_transferred = (cqe->res > 0) ? cqe->res : 0; + ::io_uring_cqe_seen (&this->ring_, cqe); + } + + { + ACE_GUARD_RETURN (ACE_Thread_Mutex, sq_guard, this->sq_mutex_, -1); + + if (result == reinterpret_cast (ACE_URING_SUBMIT_WAKE_USER_DATA)) + { + this->drain_submit_wakeup_locked (); + if (this->arm_submit_wakeup_locked () == -1) + return -1; + + int const submit_result = this->submit_pending_sqe (); + if (submit_result < 0) + { + errno = -submit_result; + return -1; + } + } + else if (result != 0) + { + owner = result->owner (); + if (owner != 0) + owner->unregister_result (result); + } + } + + if (result == reinterpret_cast (ACE_URING_SUBMIT_WAKE_USER_DATA)) + continue; + + ++processed; + + if (result == 0) + continue; + + result->complete (bytes_transferred, + error == 0 ? 1 : 0, + 0, // No completion key + error); + } + + return processed; +} + +ACE_Thread_Mutex & +ACE_Uring_Proactor::sq_mutex (void) +{ + return this->sq_mutex_; +} + +bool +ACE_Uring_Proactor::is_initialized (void) const +{ + return this->is_initialized_; +} + +struct io_uring_sqe * +ACE_Uring_Proactor::get_sqe (void) +{ + if (!this->is_initialized_) + return 0; + return ::io_uring_get_sqe (&this->ring_); +} + +int +ACE_Uring_Proactor::submit_sqe (void) +{ + if (!this->is_initialized_) + return -1; + return ::io_uring_submit (&this->ring_); +} + +int +ACE_Uring_Proactor::submit_sqe_if_necessary (void) +{ + if (!this->is_initialized_) + return -1; + + unsigned int const ready = queued_sqes (this->ring_); + if (ready == 0) + return 0; + + if (ready < DEFAULT_SUBMIT_BATCH_SIZE + && ready < this->ring_.sq.ring_entries) + return 0; + + return ::io_uring_submit (&this->ring_); +} + +int +ACE_Uring_Proactor::submit_pending_sqe (void) +{ + if (!this->is_initialized_) + return -1; + + if (queued_sqes (this->ring_) == 0) + return 0; + + return ::io_uring_submit (&this->ring_); +} + +int +ACE_Uring_Proactor::signal_submitter (void) +{ + if (!this->is_initialized_ || this->submit_wakeup_handle_ == ACE_INVALID_HANDLE) + return -1; + + if (this->on_dispatch_thread () || this->submit_signal_pending_) + return 0; + + uint64_t const one = 1; + ssize_t const rc = ::write (this->submit_wakeup_handle_, &one, sizeof (one)); + if (rc == static_cast (sizeof (one))) + { + this->submit_signal_pending_ = true; + return 0; + } + + return -1; +} + +int +ACE_Uring_Proactor::arm_submit_wakeup_locked (void) +{ + struct io_uring_sqe *const sqe = ::io_uring_get_sqe (&this->ring_); + if (sqe == 0) + { + errno = EAGAIN; + return -1; + } + + ::io_uring_prep_poll_add (sqe, this->submit_wakeup_handle_, POLLIN); + ::io_uring_sqe_set_data64 (sqe, ACE_URING_SUBMIT_WAKE_USER_DATA); + return 0; +} + +void +ACE_Uring_Proactor::drain_submit_wakeup_locked (void) +{ + if (this->submit_wakeup_handle_ == ACE_INVALID_HANDLE) + return; + + uint64_t value = 0; + while (::read (this->submit_wakeup_handle_, &value, sizeof (value)) + == static_cast (sizeof (value))) + { + } + + this->submit_signal_pending_ = false; +} + +bool +ACE_Uring_Proactor::on_dispatch_thread (void) const +{ + return ACE_OS::thr_equal (ACE_OS::thr_self (), + this->dispatch_thread_id_.value ()) != 0; +} + +ACE_Asynch_Read_Stream_Impl *ACE_Uring_Proactor::create_asynch_read_stream (void) +{ + ACE_Uring_Asynch_Read_Stream *ret = 0; + ACE_NEW_RETURN (ret, ACE_Uring_Asynch_Read_Stream (this), 0); + return ret; +} + +ACE_Asynch_Write_Stream_Impl *ACE_Uring_Proactor::create_asynch_write_stream (void) +{ + ACE_Uring_Asynch_Write_Stream *ret = 0; + ACE_NEW_RETURN (ret, ACE_Uring_Asynch_Write_Stream (this), 0); + return ret; +} + +ACE_Asynch_Read_File_Impl *ACE_Uring_Proactor::create_asynch_read_file (void) +{ + ACE_Uring_Asynch_Read_File *ret = 0; + ACE_NEW_RETURN (ret, ACE_Uring_Asynch_Read_File (this), 0); + return ret; +} + +ACE_Asynch_Write_File_Impl *ACE_Uring_Proactor::create_asynch_write_file (void) +{ + ACE_Uring_Asynch_Write_File *ret = 0; + ACE_NEW_RETURN (ret, ACE_Uring_Asynch_Write_File (this), 0); + return ret; +} + +ACE_Asynch_Accept_Impl *ACE_Uring_Proactor::create_asynch_accept (void) +{ + ACE_Uring_Asynch_Accept *ret = 0; + ACE_NEW_RETURN (ret, ACE_Uring_Asynch_Accept (this), 0); + return ret; +} + +ACE_Asynch_Connect_Impl *ACE_Uring_Proactor::create_asynch_connect (void) +{ + ACE_Uring_Asynch_Connect *ret = 0; + ACE_NEW_RETURN (ret, ACE_Uring_Asynch_Connect (this), 0); + return ret; +} + +ACE_Asynch_Transmit_File_Impl *ACE_Uring_Proactor::create_asynch_transmit_file (void) +{ + ACE_Uring_Asynch_Transmit_File *ret = 0; + ACE_NEW_RETURN (ret, ACE_Uring_Asynch_Transmit_File (this), 0); + return ret; +} + +ACE_Asynch_Read_Dgram_Impl *ACE_Uring_Proactor::create_asynch_read_dgram (void) +{ + ACE_Uring_Asynch_Read_Dgram *ret = 0; + ACE_NEW_RETURN (ret, ACE_Uring_Asynch_Read_Dgram (this), 0); + return ret; +} + +ACE_Asynch_Write_Dgram_Impl *ACE_Uring_Proactor::create_asynch_write_dgram (void) +{ + ACE_Uring_Asynch_Write_Dgram *ret = 0; + ACE_NEW_RETURN (ret, ACE_Uring_Asynch_Write_Dgram (this), 0); + return ret; +} + +ACE_Asynch_Read_Stream_Result_Impl * +ACE_Uring_Proactor::create_asynch_read_stream_result + (const ACE_Handler::Proxy_Ptr &handler_proxy, + ACE_HANDLE handle, + ACE_Message_Block &message_block, + size_t bytes_to_read, + const void *act, + ACE_HANDLE, + int, + int) +{ + ACE_Uring_Asynch_Read_Stream_Result *ret = 0; + ACE_NEW_RETURN (ret, + ACE_Uring_Asynch_Read_Stream_Result (handler_proxy, + handle, + &message_block, + bytes_to_read, + act, + 0), + 0); + return ret; +} + +ACE_Asynch_Write_Stream_Result_Impl * +ACE_Uring_Proactor::create_asynch_write_stream_result + (const ACE_Handler::Proxy_Ptr &handler_proxy, + ACE_HANDLE handle, + ACE_Message_Block &message_block, + size_t bytes_to_write, + const void *act, + ACE_HANDLE, + int, + int) +{ + ACE_Uring_Asynch_Write_Stream_Result *ret = 0; + ACE_NEW_RETURN (ret, + ACE_Uring_Asynch_Write_Stream_Result (handler_proxy, + handle, + &message_block, + bytes_to_write, + act, + 0), + 0); + return ret; +} + +ACE_Asynch_Read_File_Result_Impl * +ACE_Uring_Proactor::create_asynch_read_file_result + (const ACE_Handler::Proxy_Ptr &handler_proxy, + ACE_HANDLE handle, + ACE_Message_Block &message_block, + size_t bytes_to_read, + const void *act, + u_long offset, + u_long offset_high, + ACE_HANDLE, + int, + int) +{ + ACE_Uring_Asynch_Read_File_Result *ret = 0; + ACE_NEW_RETURN (ret, + ACE_Uring_Asynch_Read_File_Result (handler_proxy, + handle, + &message_block, + bytes_to_read, + act, + 0, + offset, + offset_high), + 0); + return ret; +} + +ACE_Asynch_Write_File_Result_Impl * +ACE_Uring_Proactor::create_asynch_write_file_result + (const ACE_Handler::Proxy_Ptr &handler, + ACE_HANDLE handle, + ACE_Message_Block &message_block, + size_t bytes_to_write, + const void *act, + u_long offset, + u_long offset_high, + ACE_HANDLE, + int, + int) +{ + ACE_Uring_Asynch_Write_File_Result *ret = 0; + ACE_NEW_RETURN (ret, ACE_Uring_Asynch_Write_File_Result (handler, handle, &message_block, bytes_to_write, act, 0, offset, offset_high), 0); + return ret; +} + +ACE_Asynch_Read_Dgram_Result_Impl * +ACE_Uring_Proactor::create_asynch_read_dgram_result ( + const ACE_Handler::Proxy_Ptr &handler_proxy, + ACE_HANDLE handle, + ACE_Message_Block *message_block, + size_t bytes_to_read, + int flags, + int protocol_family, + const void *act, + ACE_HANDLE, + int, + int) +{ + ACE_Uring_Asynch_Read_Dgram_Result *ret = 0; + ACE_NEW_RETURN (ret, + ACE_Uring_Asynch_Read_Dgram_Result (handler_proxy, + handle, + message_block, + bytes_to_read, + flags, + protocol_family, + act, + 0), + 0); + return ret; +} + +ACE_Asynch_Write_Dgram_Result_Impl * +ACE_Uring_Proactor::create_asynch_write_dgram_result ( + const ACE_Handler::Proxy_Ptr &handler_proxy, + ACE_HANDLE handle, + ACE_Message_Block *message_block, + size_t bytes_to_write, + int flags, + const void *act, + ACE_HANDLE, + int, + int) +{ + ACE_Uring_Asynch_Write_Dgram_Result *ret = 0; + ACE_NEW_RETURN (ret, + ACE_Uring_Asynch_Write_Dgram_Result (handler_proxy, + handle, + message_block, + bytes_to_write, + flags, + act, + 0), + 0); + return ret; +} + +ACE_Asynch_Accept_Result_Impl * +ACE_Uring_Proactor::create_asynch_accept_result ( + const ACE_Handler::Proxy_Ptr &handler_proxy, + ACE_HANDLE listen_handle, + ACE_HANDLE accept_handle, + ACE_Message_Block &message_block, + size_t bytes_to_read, + const void *act, + ACE_HANDLE, + int, + int) +{ + ACE_Uring_Asynch_Accept_Result *ret = 0; + ACE_NEW_RETURN (ret, + ACE_Uring_Asynch_Accept_Result (handler_proxy, + listen_handle, + accept_handle, + &message_block, + bytes_to_read, + act, + 0), + 0); + return ret; +} + +ACE_Asynch_Connect_Result_Impl * +ACE_Uring_Proactor::create_asynch_connect_result ( + const ACE_Handler::Proxy_Ptr &handler_proxy, + ACE_HANDLE connect_handle, + const void *act, + ACE_HANDLE, + int, + int) +{ + ACE_Uring_Asynch_Connect_Result *ret = 0; + ACE_NEW_RETURN (ret, ACE_Uring_Asynch_Connect_Result (handler_proxy, connect_handle, act, 0), 0); + return ret; +} + +ACE_Asynch_Transmit_File_Result_Impl * +ACE_Uring_Proactor::create_asynch_transmit_file_result ( + const ACE_Handler::Proxy_Ptr &handler_proxy, + ACE_HANDLE socket, + ACE_HANDLE file, + ACE_Asynch_Transmit_File::Header_And_Trailer *header_and_trailer, + size_t bytes_to_write, + u_long offset, + u_long offset_high, + size_t bytes_per_send, + u_long flags, + const void *act, + ACE_HANDLE, + int, + int) +{ + ACE_Uring_Asynch_Transmit_File_Result *ret = 0; + ACE_NEW_RETURN (ret, + ACE_Uring_Asynch_Transmit_File_Result (handler_proxy, + socket, + file, + header_and_trailer, + bytes_to_write, + offset, + offset_high, + bytes_per_send, + flags, + act, + 0), + 0); + return ret; +} + +ACE_Asynch_Result_Impl * +ACE_Uring_Proactor::create_asynch_timer (const ACE_Handler::Proxy_Ptr &handler_proxy, const void *act, const ACE_Time_Value &tv, ACE_HANDLE, int, int) +{ + ACE_Uring_Asynch_Timer *ret = 0; + ACE_NEW_RETURN (ret, ACE_Uring_Asynch_Timer (handler_proxy, act, tv, 0), 0); + return ret; +} + +int +ACE_Uring_Proactor::post_wakeup_completions (int count) +{ + ACE_GUARD_RETURN (ACE_Thread_Mutex, guard, this->sq_mutex_, -1); + + if (!this->is_initialized_) + return -1; + + for (int i = 0; i < count; ++i) + { + struct io_uring_sqe *const sqe = ::io_uring_get_sqe (&this->ring_); + if (!sqe) + break; + ::io_uring_prep_nop (sqe); + ::io_uring_sqe_set_data (sqe, 0); + } + return ::io_uring_submit (&this->ring_); +} + +ACE_END_VERSIONED_NAMESPACE_DECL + +#endif /* ACE_HAS_AIO_CALLS && ACE_HAS_IO_URING */ diff --git a/ACE/ace/Uring_Proactor.h b/ACE/ace/Uring_Proactor.h new file mode 100644 index 0000000000000..38b5524e04d02 --- /dev/null +++ b/ACE/ace/Uring_Proactor.h @@ -0,0 +1,291 @@ +// -*- C++ -*- + +//============================================================================= +/** + * @file Uring_Proactor.h + * + * The implementation classes for the Linux io_uring Proactor are + * defined here in this file. + */ +//============================================================================= + +#ifndef ACE_URING_PROACTOR_H +#define ACE_URING_PROACTOR_H + +#include /**/ "ace/pre.h" + +#include /**/ "ace/config-all.h" + +#if !defined (ACE_LACKS_PRAGMA_ONCE) +#pragma once +#endif /* ACE_LACKS_PRAGMA_ONCE */ + +#if defined (ACE_HAS_AIO_CALLS) && defined (ACE_HAS_IO_URING) + +#include "ace/Atomic_Op.h" +#include "ace/OS_NS_Thread.h" +#include "ace/Proactor_Impl.h" +#include "ace/Thread_Mutex.h" + +#include + +ACE_BEGIN_VERSIONED_NAMESPACE_DECL + +class ACE_Uring_Asynch_Result; + +/** + * @class ACE_Uring_Proactor + * + * @brief Linux io_uring implementation of the Proactor. + * + * This class provides the factory methods and completion dispatch + * support needed by the ACE asynchronous operation classes when the + * backend is implemented on top of Linux io_uring. + */ +class ACE_Export ACE_Uring_Proactor : public ACE_Proactor_Impl +{ +public: + /// Initialize an io_uring-backed Proactor with the requested ring depth. + ACE_Uring_Proactor (size_t entries = 256); + + /// Release the ring and any wakeup resources owned by the Proactor. + virtual ~ACE_Uring_Proactor (void); + + /// Shut down the ring and stop further completion dispatch. + virtual int close (void); + + /// Register a handle with the backend if the implementation needs it. + virtual int register_handle (ACE_HANDLE handle, + const void *completion_key); + + /// Dispatch completions until one is handled or @a wait_time expires. + virtual int handle_events (ACE_Time_Value &wait_time); + + /// Block until at least one completion is dispatched. + virtual int handle_events (void); + + /// Wake any threads blocked in completion dispatch. + virtual int wake_up_dispatch_threads (void); + + /// Request that dispatch threads exit and optionally wait for them. + virtual int close_dispatch_threads (int wait); + + /// Return the number of threads currently dispatching completions. + virtual size_t number_of_threads (void) const; + + /// Set the expected number of dispatch threads. + virtual void number_of_threads (size_t threads); + + /// Return the handle used to wake blocked dispatch threads. + virtual ACE_HANDLE get_handle (void) const; + + // Methods used to create Asynch I/O factory and result objects. + + /// Create the io_uring read-stream initiator. + virtual ACE_Asynch_Read_Stream_Impl *create_asynch_read_stream (void); + + /// Create the io_uring write-stream initiator. + virtual ACE_Asynch_Write_Stream_Impl *create_asynch_write_stream (void); + + /// Create the io_uring read-file initiator. + virtual ACE_Asynch_Read_File_Impl *create_asynch_read_file (void); + + /// Create the io_uring write-file initiator. + virtual ACE_Asynch_Write_File_Impl *create_asynch_write_file (void); + + /// Create the io_uring accept initiator. + virtual ACE_Asynch_Accept_Impl *create_asynch_accept (void); + + /// Create the io_uring connect initiator. + virtual ACE_Asynch_Connect_Impl *create_asynch_connect (void); + + /// Create the io_uring transmit-file initiator. + virtual ACE_Asynch_Transmit_File_Impl *create_asynch_transmit_file (void); + + /// Create the io_uring datagram-read initiator. + virtual ACE_Asynch_Read_Dgram_Impl *create_asynch_read_dgram (void); + + /// Create the io_uring datagram-write initiator. + virtual ACE_Asynch_Write_Dgram_Impl *create_asynch_write_dgram (void); + + /// Create a read-stream result object for one pending operation. + virtual ACE_Asynch_Read_Stream_Result_Impl * + create_asynch_read_stream_result (const ACE_Handler::Proxy_Ptr &handler_proxy, + ACE_HANDLE handle, + ACE_Message_Block &message_block, + size_t bytes_to_read, + const void *act, + ACE_HANDLE event = ACE_INVALID_HANDLE, + int priority = 0, + int signal_number = ACE_SIGRTMIN); + + /// Create a write-stream result object for one pending operation. + virtual ACE_Asynch_Write_Stream_Result_Impl * + create_asynch_write_stream_result (const ACE_Handler::Proxy_Ptr &handler_proxy, + ACE_HANDLE handle, + ACE_Message_Block &message_block, + size_t bytes_to_write, + const void *act, + ACE_HANDLE event = ACE_INVALID_HANDLE, + int priority = 0, + int signal_number = ACE_SIGRTMIN); + + /// Create a read-file result object for one pending operation. + virtual ACE_Asynch_Read_File_Result_Impl * + create_asynch_read_file_result (const ACE_Handler::Proxy_Ptr &handler_proxy, + ACE_HANDLE handle, + ACE_Message_Block &message_block, + size_t bytes_to_read, + const void *act, + u_long offset, + u_long offset_high, + ACE_HANDLE event = ACE_INVALID_HANDLE, + int priority = 0, + int signal_number = ACE_SIGRTMIN); + + /// Create a write-file result object for one pending operation. + virtual ACE_Asynch_Write_File_Result_Impl * + create_asynch_write_file_result (const ACE_Handler::Proxy_Ptr &handler_proxy, + ACE_HANDLE handle, + ACE_Message_Block &message_block, + size_t bytes_to_write, + const void *act, + u_long offset, + u_long offset_high, + ACE_HANDLE event = ACE_INVALID_HANDLE, + int priority = 0, + int signal_number = ACE_SIGRTMIN); + + /// Create a datagram-read result object for one pending operation. + virtual ACE_Asynch_Read_Dgram_Result_Impl * + create_asynch_read_dgram_result (const ACE_Handler::Proxy_Ptr &handler_proxy, + ACE_HANDLE handle, + ACE_Message_Block *message_block, + size_t bytes_to_read, + int flags, + int protocol_family, + const void *act, + ACE_HANDLE event = ACE_INVALID_HANDLE, + int priority = 0, + int signal_number = ACE_SIGRTMIN); + + /// Create a datagram-write result object for one pending operation. + virtual ACE_Asynch_Write_Dgram_Result_Impl * + create_asynch_write_dgram_result (const ACE_Handler::Proxy_Ptr &handler_proxy, + ACE_HANDLE handle, + ACE_Message_Block *message_block, + size_t bytes_to_write, + int flags, + const void *act, + ACE_HANDLE event = ACE_INVALID_HANDLE, + int priority = 0, + int signal_number = ACE_SIGRTMIN); + + /// Create an accept result object for one pending operation. + virtual ACE_Asynch_Accept_Result_Impl * + create_asynch_accept_result (const ACE_Handler::Proxy_Ptr &handler_proxy, + ACE_HANDLE listen_handle, + ACE_HANDLE accept_handle, + ACE_Message_Block &message_block, + size_t bytes_to_read, + const void *act, + ACE_HANDLE event = ACE_INVALID_HANDLE, + int priority = 0, + int signal_number = ACE_SIGRTMIN); + + /// Create a connect result object for one pending operation. + virtual ACE_Asynch_Connect_Result_Impl * + create_asynch_connect_result (const ACE_Handler::Proxy_Ptr &handler_proxy, + ACE_HANDLE connect_handle, + const void *act, + ACE_HANDLE event = ACE_INVALID_HANDLE, + int priority = 0, + int signal_number = ACE_SIGRTMIN); + + /// Create a transmit-file result object for one pending operation. + virtual ACE_Asynch_Transmit_File_Result_Impl * + create_asynch_transmit_file_result + (const ACE_Handler::Proxy_Ptr &handler_proxy, + ACE_HANDLE socket, + ACE_HANDLE file, + ACE_Asynch_Transmit_File::Header_And_Trailer *header_and_trailer, + size_t bytes_to_write, + u_long offset, + u_long offset_high, + size_t bytes_per_send, + u_long flags, + const void *act, + ACE_HANDLE event = ACE_INVALID_HANDLE, + int priority = 0, + int signal_number = ACE_SIGRTMIN); + + /// Create a timer completion result object. + virtual ACE_Asynch_Result_Impl * + create_asynch_timer (const ACE_Handler::Proxy_Ptr &handler_proxy, + const void *act, + const ACE_Time_Value &tv, + ACE_HANDLE event = ACE_INVALID_HANDLE, + int priority = 0, + int signal_number = 0); + + /// Post internal wakeup completions into the ring. + virtual int post_wakeup_completions (int how_many); + + /// Serialize SQE preparation and submission. + ACE_Thread_Mutex &sq_mutex (void); + + /// Return non-zero once the ring has been initialized successfully. + bool is_initialized (void) const; + + /// Access to the underlying ring for operation implementations. + struct io_uring_sqe *get_sqe (void); + + /// Submit all prepared SQEs to the kernel. + int submit_sqe (void); + + /// Submit prepared SQEs if the ring currently needs a submit call. + int submit_sqe_if_necessary (void); + + /// Submit queued SQEs while preserving batching behavior. + int submit_pending_sqe (void); + + /// Wake a non-dispatch thread so it can flush pending submissions. + int signal_submitter (void); + +protected: + /// Drain and dispatch up to @a max_to_process CQEs. + int process_cqes (int max_to_process, + const ACE_Time_Value *wait_time = 0); + +private: + enum + { + DEFAULT_CQE_BATCH_SIZE = 256, + DEFAULT_SUBMIT_BATCH_SIZE = 8 + }; + + /// Arm the internal wakeup eventfd while the SQ mutex is held. + int arm_submit_wakeup_locked (void); + + /// Drain pending submit wakeups while the SQ mutex is held. + void drain_submit_wakeup_locked (void); + + /// Return non-zero when called from a dispatching thread. + bool on_dispatch_thread (void) const; + + struct io_uring ring_; + bool is_initialized_; + bool submit_signal_pending_; + ACE_HANDLE submit_wakeup_handle_; + ACE_Atomic_Op dispatch_thread_id_; + mutable ACE_Thread_Mutex dispatch_mutex_; + mutable ACE_Thread_Mutex sq_mutex_; + mutable ACE_Thread_Mutex cq_mutex_; +}; + +ACE_END_VERSIONED_NAMESPACE_DECL + +#endif /* ACE_HAS_AIO_CALLS && ACE_HAS_IO_URING */ + +#include /**/ "ace/post.h" +#endif /* ACE_URING_PROACTOR_H */ diff --git a/ACE/ace/WIN32_Asynch_IO.cpp b/ACE/ace/WIN32_Asynch_IO.cpp index 6a8faf87b1321..25e3800ef7b10 100644 --- a/ACE/ace/WIN32_Asynch_IO.cpp +++ b/ACE/ace/WIN32_Asynch_IO.cpp @@ -3253,9 +3253,15 @@ ACE_WIN32_Asynch_Read_Dgram_Result::ACE_WIN32_Asynch_Read_Dgram_Result ( flags_ (flags), handle_ (handle) { - ACE_ASSERT (protocol_family == PF_INET); // only supporting INET addresses + ACE_ASSERT (protocol_family == PF_INET || protocol_family == PF_INET6); // only supporting INET and INET6 addresses + +#if defined (ACE_HAS_IPV6) + if (protocol_family == PF_INET6) + ACE_NEW (remote_address_, ACE_INET_Addr (static_cast (0), ACE_IPV6_ANY)); + else +#endif /* ACE_HAS_IPV6 */ + ACE_NEW (remote_address_, ACE_INET_Addr); - ACE_NEW (remote_address_, ACE_INET_Addr); addr_len_ = remote_address_->get_size (); ACE_UNUSED_ARG (protocol_family); diff --git a/ACE/ace/ace.mpc b/ACE/ace/ace.mpc index f1e84d4019836..c6808bceaa6b7 100644 --- a/ACE/ace/ace.mpc +++ b/ACE/ace/ace.mpc @@ -279,6 +279,8 @@ project(ACE) : ace_output, acedefaults, install, other, codecs, token, svcconf, UPIPE_Acceptor.cpp UPIPE_Connector.cpp UPIPE_Stream.cpp + Uring_Asynch_IO.cpp + Uring_Proactor.cpp WFMO_Reactor.cpp WIN32_Asynch_IO.cpp WIN32_Proactor.cpp @@ -465,6 +467,11 @@ project(ACE) : ace_output, acedefaults, install, other, codecs, token, svcconf, Timer_Wheel.h Truncate.h UPIPE_Addr.h + Uring_Asynch_IO.h + Uring_Proactor.h + WFMO_Reactor.h + WIN32_Asynch_IO.h + WIN32_Proactor.h Value_Ptr.h Version.h Versioned_Namespace.h diff --git a/ACE/bin/MakeProjectCreator/config/acedefaults.mpb b/ACE/bin/MakeProjectCreator/config/acedefaults.mpb index 289b574179c5e..ebef5d7077e18 100644 --- a/ACE/bin/MakeProjectCreator/config/acedefaults.mpb +++ b/ACE/bin/MakeProjectCreator/config/acedefaults.mpb @@ -1,5 +1,5 @@ // -*- MPC -*- -project: ipv6, vc_warnings, build_files, test_files, svc_conf_files, ace_unicode, ace_idl_dependencies { +project: ipv6, uring, vc_warnings, build_files, test_files, svc_conf_files, ace_unicode, ace_idl_dependencies { staticflags += ACE_AS_STATIC_LIBS includes += $(ACE_ROOT) libpaths += $(ACE_ROOT)/lib diff --git a/ACE/bin/MakeProjectCreator/config/global.features b/ACE/bin/MakeProjectCreator/config/global.features index e59027e8290ed..39043f7de8ddb 100644 --- a/ACE/bin/MakeProjectCreator/config/global.features +++ b/ACE/bin/MakeProjectCreator/config/global.features @@ -38,6 +38,7 @@ bzip2 = 0 lzo1 = 0 lzo2 = 0 ipv6 = 0 +uring = 0 mfc = 0 rpc = 0 sctp = 0 diff --git a/ACE/bin/MakeProjectCreator/config/uring.mpb b/ACE/bin/MakeProjectCreator/config/uring.mpb new file mode 100644 index 0000000000000..9dde125101945 --- /dev/null +++ b/ACE/bin/MakeProjectCreator/config/uring.mpb @@ -0,0 +1,5 @@ +// -*- MPC -*- +feature(uring) { + macros += ACE_HAS_IO_URING + lit_libs += uring +} diff --git a/ACE/include/makeinclude/platform_linux.GNU b/ACE/include/makeinclude/platform_linux.GNU index 52d4a5bfc54de..b2dd1820e4fdf 100644 --- a/ACE/include/makeinclude/platform_linux.GNU +++ b/ACE/include/makeinclude/platform_linux.GNU @@ -69,6 +69,11 @@ ifeq ($(threads),1) LIBS += -lrt endif +ifeq ($(uring),1) + CPPFLAGS += -DACE_HAS_IO_URING + LIBS += -luring +endif + ifeq ($(optimize),1) SOFLAGS += -Wl,-O3 endif diff --git a/ACE/tests/.gitignore b/ACE/tests/.gitignore index 00e38997fb455..3ea879c9dd76f 100644 --- a/ACE/tests/.gitignore +++ b/ACE/tests/.gitignore @@ -179,8 +179,11 @@ /Priority_Buffer_Test /Priority_Reactor_Test /Priority_Task_Test +/Proactor_Contract_Test /Proactor_File_Test +/Proactor_Network_Performance_Test /Proactor_Scatter_Gather_Test +/Proactor_Stress_Test /Proactor_Test /Proactor_Test_IPV6 /Proactor_Timer_Test diff --git a/ACE/tests/Proactor_Contract_Test.cpp b/ACE/tests/Proactor_Contract_Test.cpp new file mode 100644 index 0000000000000..10ef2af76a3a4 --- /dev/null +++ b/ACE/tests/Proactor_Contract_Test.cpp @@ -0,0 +1,917 @@ +// ============================================================================ +/** + * @file Proactor_Contract_Test.cpp + * + * Focused regression coverage for common ACE Proactor contracts and + * backend-specific edge cases. + */ +// ============================================================================ + +#include "test_config.h" + +#if defined (ACE_HAS_WIN32_OVERLAPPED_IO) || defined (ACE_HAS_AIO_CALLS) + +#include "ace/Asynch_IO.h" +#include "ace/FILE_Connector.h" +#include "ace/FILE_IO.h" +#include "ace/Get_Opt.h" +#include "ace/INET_Addr.h" +#include "ace/Log_Category.h" +#include "ace/Message_Block.h" +#include "ace/OS_NS_dirent.h" +#include "ace/OS_NS_errno.h" +#include "ace/OS_NS_fcntl.h" +#include "ace/OS_NS_sys_socket.h" +#include "ace/OS_NS_unistd.h" +#include "ace/Proactor.h" +#include "ace/SOCK_Acceptor.h" +#include "ace/SOCK_Connector.h" +#include "ace/SOCK_Stream.h" +#include "ace/Time_Value.h" +#include "Proactor_Test_Backend.h" + +namespace +{ + Proactor_Test_Backend::Type proactor_type = Proactor_Test_Backend::BACKEND_DEFAULT; + + class Library_Error_Mask_Guard + { + public: + Library_Error_Mask_Guard (void) + : category_ (ACE_Log_Category::ace_lib ()) + , thread_category_ (category_.per_thr_obj ()) + , category_mask_ (category_.priority_mask ()) + , thread_mask_ (thread_category_ != 0 + ? thread_category_->priority_mask () + : 0) + { + category_.priority_mask (category_mask_ & ~LM_ERROR); + if (thread_category_ != 0) + thread_category_->priority_mask (thread_mask_ & ~LM_ERROR); + } + + ~Library_Error_Mask_Guard (void) + { + category_.priority_mask (category_mask_); + if (thread_category_ != 0) + thread_category_->priority_mask (thread_mask_); + } + + private: + Library_Error_Mask_Guard (const Library_Error_Mask_Guard &); + Library_Error_Mask_Guard &operator= (const Library_Error_Mask_Guard &); + + ACE_Log_Category &category_; + ACE_Log_Category_TSS *thread_category_; + u_long const category_mask_; + u_long const thread_mask_; + }; + + class Accept_Handler : public ACE_Handler + { + public: + Accept_Handler (void) + : done_ (false) + , success_ (false) + , bytes_transferred_ (static_cast (-1)) + , accept_handle_ (ACE_INVALID_HANDLE) + , error_ (0) + { + } + + virtual void handle_accept (const ACE_Asynch_Accept::Result &result) + { + this->done_ = true; + this->success_ = result.success () != 0; + this->bytes_transferred_ = result.bytes_transferred (); + this->accept_handle_ = result.accept_handle (); + this->error_ = result.error (); + } + + bool done_; + bool success_; + size_t bytes_transferred_; + ACE_HANDLE accept_handle_; + u_long error_; + }; + + class Dummy_Handler : public ACE_Handler + { + }; + + class Connect_Handler : public ACE_Handler + { + public: + Connect_Handler (void) + : done_ (false) + , success_ (false) + , connect_handle_ (ACE_INVALID_HANDLE) + , error_ (0) + { + } + + virtual void handle_connect (const ACE_Asynch_Connect::Result &result) + { + this->done_ = true; + this->success_ = result.success () != 0; + this->connect_handle_ = result.connect_handle (); + this->error_ = result.error (); + } + + bool done_; + bool success_; + ACE_HANDLE connect_handle_; + u_long error_; + }; + + class Transmit_Handler : public ACE_Handler + { + public: + Transmit_Handler (void) + : done_ (false) + , success_ (false) + , bytes_transferred_ (0) + , error_ (0) + { + } + + virtual void handle_transmit_file (const ACE_Asynch_Transmit_File::Result &result) + { + this->done_ = true; + this->success_ = result.success () != 0; + this->bytes_transferred_ = result.bytes_transferred (); + this->error_ = result.error (); + } + + bool done_; + bool success_; + size_t bytes_transferred_; + u_long error_; + }; + + class Timer_Handler : public ACE_Handler + { + public: + Timer_Handler (void) + : done_ (false) + { + } + + virtual void handle_time_out (const ACE_Time_Value &, const void *) + { + this->done_ = true; + } + + bool done_; + }; + + size_t + count_open_fds (void) + { +#if defined (ACE_WIN32) + return 0; +#else + size_t count = 0; + ACE_DIR *dir = ACE_OS::opendir (ACE_TEXT ("/proc/self/fd")); + if (dir == 0) + return 0; + + for (ACE_DIRENT *entry = ACE_OS::readdir (dir); + entry != 0; + entry = ACE_OS::readdir (dir)) + { + if (ACE_OS::strcmp (entry->d_name, ".") != 0 + && ACE_OS::strcmp (entry->d_name, "..") != 0) + ++count; + } + + ACE_OS::closedir (dir); + return count; +#endif /* ACE_WIN32 */ + } + + int + parse_args (int argc, ACE_TCHAR *argv[]) + { + ACE_Get_Opt get_opt (argc, argv, ACE_TEXT ("t:")); + int c = 0; + + while ((c = get_opt ()) != EOF) + { + switch (c) + { + case 't': + if (Proactor_Test_Backend::parse_type (get_opt.opt_arg (), proactor_type) == 0) + break; + return -1; + default: + return -1; + } + } + + return 0; + } + + int + run_accept_contract_test (ACE_Proactor &proactor) + { + ACE_SOCK_Acceptor listen_socket; + ACE_INET_Addr listen_addr ((u_short) 0, ACE_LOCALHOST); + + if (listen_socket.open (listen_addr, 1) != 0) + { + ACE_ERROR_RETURN ((LM_ERROR, + ACE_TEXT ("%p\n"), + ACE_TEXT ("listen_socket.open")), + -1); + } + + if (listen_socket.get_local_addr (listen_addr) != 0) + { + ACE_ERROR_RETURN ((LM_ERROR, + ACE_TEXT ("%p\n"), + ACE_TEXT ("listen_socket.get_local_addr")), + -1); + } + + const int addr_family = listen_addr.get_type (); + + Accept_Handler handler; + ACE_Asynch_Accept acceptor; + if (acceptor.open (handler, listen_socket.get_handle (), 0, &proactor) != 0) + { + ACE_ERROR_RETURN ((LM_ERROR, + ACE_TEXT ("%p\n"), + ACE_TEXT ("ACE_Asynch_Accept::open")), + -1); + } + + ACE_Message_Block mb (1024); + if (acceptor.accept (mb, 0, ACE_INVALID_HANDLE, 0, 0, ACE_SIGRTMIN, addr_family) != 0) + { + ACE_ERROR_RETURN ((LM_ERROR, + ACE_TEXT ("%p\n"), + ACE_TEXT ("ACE_Asynch_Accept::accept")), + -1); + } + + ACE_Time_Value zero = ACE_Time_Value::zero; + const int empty_poll = proactor.handle_events (zero); + if (empty_poll != 0) + { + ACE_ERROR ((LM_ERROR, + ACE_TEXT ("Expected empty zero-timeout poll to return 0; got %d\n"), + empty_poll)); + return -1; + } + + ACE_SOCK_Connector sock_connector; + ACE_SOCK_Stream client_stream; + if (sock_connector.connect (client_stream, listen_addr) != 0) + { + ACE_ERROR_RETURN ((LM_ERROR, + ACE_TEXT ("%p\n"), + ACE_TEXT ("connector.connect")), + -1); + } + + const ACE_Time_Value deadline = ACE_OS::gettimeofday () + ACE_Time_Value (5); + while (!handler.done_ && ACE_OS::gettimeofday () < deadline) + { + ACE_Time_Value poll_timeout = ACE_Time_Value::zero; + const int result = proactor.handle_events (poll_timeout); + if (result == -1) + { + ACE_ERROR_RETURN ((LM_ERROR, + ACE_TEXT ("%p\n"), + ACE_TEXT ("ACE_Proactor::handle_events")), + -1); + } + + if (!handler.done_) + ACE_OS::sleep (ACE_Time_Value (0, 1000)); + } + + if (!handler.done_) + { + ACE_ERROR ((LM_ERROR, + ACE_TEXT ("Timed out waiting for accept completion\n"))); + return -1; + } + + if (!handler.success_) + { + ACE_ERROR ((LM_ERROR, + ACE_TEXT ("Accept completion failed with error %u\n"), + handler.error_)); + return -1; + } + + if (handler.accept_handle_ == ACE_INVALID_HANDLE) + { + ACE_ERROR ((LM_ERROR, + ACE_TEXT ("Accept completion returned invalid accept handle\n"))); + return -1; + } + + if (handler.bytes_transferred_ != 0) + { + ACE_ERROR ((LM_ERROR, + ACE_TEXT ("Accept completion bytes_transferred=%B; expected 0\n"), + handler.bytes_transferred_)); + return -1; + } + + ACE_OS::closesocket (handler.accept_handle_); + client_stream.close (); + listen_socket.close (); + return 0; + } + + int + run_accept_cancel_test (ACE_Proactor &proactor) + { + ACE_SOCK_Acceptor listen_socket; + ACE_INET_Addr listen_addr ((u_short) 0, ACE_LOCALHOST); + + if (listen_socket.open (listen_addr, 1) != 0) + { + ACE_ERROR_RETURN ((LM_ERROR, + ACE_TEXT ("%p\n"), + ACE_TEXT ("listen_socket.open")), + -1); + } + + if (listen_socket.get_local_addr (listen_addr) != 0) + { + ACE_ERROR_RETURN ((LM_ERROR, + ACE_TEXT ("%p\n"), + ACE_TEXT ("listen_socket.get_local_addr")), + -1); + } + + const int addr_family = listen_addr.get_type (); + + Accept_Handler handler; + ACE_Asynch_Accept acceptor; + if (acceptor.open (handler, listen_socket.get_handle (), 0, &proactor) != 0) + { + ACE_ERROR_RETURN ((LM_ERROR, + ACE_TEXT ("%p\n"), + ACE_TEXT ("ACE_Asynch_Accept::open")), + -1); + } + + ACE_Message_Block mb (1024); + if (acceptor.accept (mb, 0, ACE_INVALID_HANDLE, 0, 0, ACE_SIGRTMIN, addr_family) != 0) + { + ACE_ERROR_RETURN ((LM_ERROR, + ACE_TEXT ("%p\n"), + ACE_TEXT ("ACE_Asynch_Accept::accept")), + -1); + } + + const int cancel_result = acceptor.cancel (); + if (cancel_result != 0) + { + ACE_ERROR ((LM_ERROR, + ACE_TEXT ("accept cancel returned %d; expected 0\n"), + cancel_result)); + return -1; + } + + const ACE_Time_Value deadline = ACE_OS::gettimeofday () + ACE_Time_Value (5); + while (!handler.done_ && ACE_OS::gettimeofday () < deadline) + { + ACE_Time_Value wait_time (0, 10000); + const int result = proactor.handle_events (wait_time); + if (result == -1) + { + ACE_ERROR_RETURN ((LM_ERROR, + ACE_TEXT ("%p\n"), + ACE_TEXT ("ACE_Proactor::handle_events")), + -1); + } + } + + if (!handler.done_) + { + ACE_ERROR ((LM_ERROR, + ACE_TEXT ("Timed out waiting for canceled accept completion\n"))); + return -1; + } + + if (handler.success_) + { + ACE_ERROR ((LM_ERROR, + ACE_TEXT ("Canceled accept unexpectedly succeeded\n"))); + return -1; + } + + const u_long expected_cancel_error = +#if defined (ACE_WIN32) + ERROR_OPERATION_ABORTED +#else + ECANCELED +#endif /* ACE_WIN32 */ + ; + + if (handler.error_ != expected_cancel_error) + { + ACE_ERROR ((LM_ERROR, + ACE_TEXT ("Canceled accept error=%u; expected %u\n"), + handler.error_, + expected_cancel_error)); + return -1; + } + + if (handler.bytes_transferred_ != 0) + { + ACE_ERROR ((LM_ERROR, + ACE_TEXT ("Canceled accept bytes_transferred=%B; expected 0\n"), + handler.bytes_transferred_)); + return -1; + } + + if (handler.accept_handle_ != ACE_INVALID_HANDLE) + { + ACE_ERROR ((LM_ERROR, + ACE_TEXT ("Canceled accept returned unexpected handle %d\n"), + handler.accept_handle_)); + ACE_OS::closesocket (handler.accept_handle_); + return -1; + } + + listen_socket.close (); + return 0; + } + + int + run_connect_failure_cleanup_test (ACE_Proactor &proactor) + { +#if defined (ACE_WIN32) + ACE_SOCK_Acceptor remote_socket; + ACE_INET_Addr remote_addr ((u_short) 0, ACE_LOCALHOST); + if (remote_socket.open (remote_addr, 1) != 0) + { + ACE_ERROR_RETURN ((LM_ERROR, + ACE_TEXT ("%p\n"), + ACE_TEXT ("remote_socket.open")), + -1); + } + + if (remote_socket.get_local_addr (remote_addr) != 0) + { + ACE_ERROR_RETURN ((LM_ERROR, + ACE_TEXT ("%p\n"), + ACE_TEXT ("remote_socket.get_local_addr")), + -1); + } + + // Exercise Windows connect-failure cleanup via an unused destination + // port. Forcing a local bind collision drives the WIN32 connector down + // an immediate-error path that corrupts teardown state. + remote_socket.close (); +#else + ACE_SOCK_Acceptor busy_local_socket; + ACE_INET_Addr busy_local_addr ((u_short) 0, ACE_LOCALHOST); + if (busy_local_socket.open (busy_local_addr, 1) != 0) + { + ACE_ERROR_RETURN ((LM_ERROR, + ACE_TEXT ("%p\n"), + ACE_TEXT ("busy_local_socket.open")), + -1); + } + + if (busy_local_socket.get_local_addr (busy_local_addr) != 0) + { + ACE_ERROR_RETURN ((LM_ERROR, + ACE_TEXT ("%p\n"), + ACE_TEXT ("busy_local_socket.get_local_addr")), + -1); + } + + ACE_SOCK_Acceptor remote_socket; + ACE_INET_Addr remote_addr ((u_short) 0, ACE_LOCALHOST); + if (remote_socket.open (remote_addr, 1) != 0) + { + ACE_ERROR_RETURN ((LM_ERROR, + ACE_TEXT ("%p\n"), + ACE_TEXT ("remote_socket.open")), + -1); + } + + if (remote_socket.get_local_addr (remote_addr) != 0) + { + ACE_ERROR_RETURN ((LM_ERROR, + ACE_TEXT ("%p\n"), + ACE_TEXT ("remote_socket.get_local_addr")), + -1); + } +#endif /* ACE_WIN32 */ + + Connect_Handler handler; + ACE_Asynch_Connect connector; + if (connector.open (handler, ACE_INVALID_HANDLE, 0, &proactor) != 0) + { + ACE_ERROR_RETURN ((LM_ERROR, + ACE_TEXT ("%p\n"), + ACE_TEXT ("ACE_Asynch_Connect::open")), + -1); + } + + const size_t fd_count_before = count_open_fds (); + const int attempts = +#if defined (ACE_WIN32) + 4 +#else + 32 +#endif /* ACE_WIN32 */ + ; + + for (int i = 0; i < attempts; ++i) + { + handler.done_ = false; + handler.success_ = false; + handler.connect_handle_ = ACE_INVALID_HANDLE; + handler.error_ = 0; + errno = 0; + int result = 0; + { + Library_Error_Mask_Guard error_mask_guard; +#if defined (ACE_WIN32) + result = connector.connect (ACE_INVALID_HANDLE, + remote_addr, + ACE_Addr::sap_any, + 0, + 0); +#else + result = connector.connect (ACE_INVALID_HANDLE, + remote_addr, + busy_local_addr, + 0, + 0); +#endif /* ACE_WIN32 */ + } + + if (result == 0) + { + const ACE_Time_Value deadline = ACE_OS::gettimeofday () + ACE_Time_Value (5); + while (!handler.done_ && ACE_OS::gettimeofday () < deadline) + { + ACE_Time_Value wait_time (0, 10000); + const int handle_result = proactor.handle_events (wait_time); + if (handle_result == -1) + { + ACE_ERROR_RETURN ((LM_ERROR, + ACE_TEXT ("%p\n"), + ACE_TEXT ("ACE_Proactor::handle_events")), + -1); + } + } + + if (!handler.done_) + { + ACE_ERROR ((LM_ERROR, + ACE_TEXT ("connect failure did not complete on iteration %d\n"), + i)); + return -1; + } + + if (handler.success_) + { + ACE_ERROR ((LM_ERROR, + ACE_TEXT ("connect failure unexpectedly succeeded on iteration %d\n"), + i)); + return -1; + } + } + else if (result != -1) + { + ACE_ERROR ((LM_ERROR, + ACE_TEXT ("connect failure returned unexpected rc=%d on iteration %d\n"), + result, + i)); + return -1; + } + else if (errno == 0) + { + ACE_ERROR ((LM_ERROR, + ACE_TEXT ("connect failure cleanup test did not set errno on iteration %d\n"), + i)); + return -1; + } + } + + const size_t fd_count_after = count_open_fds (); + if (fd_count_before != fd_count_after) + { + ACE_ERROR ((LM_ERROR, + ACE_TEXT ("connect failure leaked fds: before=%B after=%B\n"), + fd_count_before, + fd_count_after)); + return -1; + } + + remote_socket.close (); +#if !defined (ACE_WIN32) + busy_local_socket.close (); +#endif /* !ACE_WIN32 */ + return 0; + } + + int + run_transmit_file_contract_test (ACE_Proactor &proactor) + { +#if defined (ACE_WIN32) + ACE_UNUSED_ARG (proactor); + ACE_DEBUG ((LM_INFO, + ACE_TEXT ("Skipping transmit_file contract on Windows; ") + ACE_TEXT ("this test harness uses non-overlapped stream handles.\n"))); + return 0; +#endif /* ACE_WIN32 */ + + ACE_SOCK_Acceptor listen_socket; + ACE_INET_Addr listen_addr ((u_short) 0, ACE_LOCALHOST); + if (listen_socket.open (listen_addr, 1) != 0) + { + ACE_ERROR_RETURN ((LM_ERROR, + ACE_TEXT ("%p\n"), + ACE_TEXT ("listen_socket.open")), + -1); + } + + if (listen_socket.get_local_addr (listen_addr) != 0) + { + ACE_ERROR_RETURN ((LM_ERROR, + ACE_TEXT ("%p\n"), + ACE_TEXT ("listen_socket.get_local_addr")), + -1); + } + + ACE_SOCK_Connector connector; + ACE_SOCK_Stream client_stream; + if (connector.connect (client_stream, listen_addr) != 0) + { + ACE_ERROR_RETURN ((LM_ERROR, + ACE_TEXT ("%p\n"), + ACE_TEXT ("connector.connect")), + -1); + } + + ACE_SOCK_Stream server_stream; + if (listen_socket.accept (server_stream) != 0) + { + ACE_ERROR_RETURN ((LM_ERROR, + ACE_TEXT ("%p\n"), + ACE_TEXT ("listen_socket.accept")), + -1); + } + + ACE_FILE_Connector file_connector; + ACE_FILE_IO file; + if (file_connector.connect (file, ACE_sap_any_cast (ACE_FILE_Addr &)) != 0) + { + server_stream.close (); + client_stream.close (); + listen_socket.close (); + ACE_ERROR_RETURN ((LM_ERROR, + ACE_TEXT ("%p\n"), + ACE_TEXT ("ACE_FILE_Connector::connect")), + -1); + } + + ACE_FILE_Addr tmp_addr; + file.get_local_addr (tmp_addr); + if (ACE_OS::write (file.get_handle (), "u", 1) != 1) + { + file.remove (); + server_stream.close (); + client_stream.close (); + listen_socket.close (); + ACE_ERROR_RETURN ((LM_ERROR, + ACE_TEXT ("%p\n"), + ACE_TEXT ("write")), + -1); + } + + Transmit_Handler handler; + ACE_Asynch_Transmit_File transmit_file; + if (transmit_file.open (handler, server_stream.get_handle (), 0, &proactor) != 0) + { + file.remove (); + server_stream.close (); + client_stream.close (); + listen_socket.close (); + ACE_ERROR_RETURN ((LM_ERROR, + ACE_TEXT ("%p\n"), + ACE_TEXT ("ACE_Asynch_Transmit_File::open")), + -1); + } + + errno = 0; + const int result = transmit_file.transmit_file (file.get_handle (), + 0, + 1, + 0, + 0, + 0, + 0, + 0); + if (proactor_type == Proactor_Test_Backend::BACKEND_URING) + { + if (result != -1 || errno != ENOTSUP) + { + ACE_ERROR ((LM_ERROR, + ACE_TEXT ("transmit_file result=%d errno=%d; expected -1/ENOTSUP\n"), + result, + errno)); + file.remove (); + server_stream.close (); + client_stream.close (); + listen_socket.close (); + return -1; + } + } + else + { + if (result != 0) + { + ACE_ERROR ((LM_ERROR, + ACE_TEXT ("POSIX transmit_file returned %d errno=%d; expected 0\n"), + result, + errno)); + file.remove (); + server_stream.close (); + client_stream.close (); + listen_socket.close (); + return -1; + } + + const ACE_Time_Value deadline = ACE_OS::gettimeofday () + ACE_Time_Value (5); + while (!handler.done_ && ACE_OS::gettimeofday () < deadline) + { + ACE_Time_Value wait_time (0, 10000); + const int handle_result = proactor.handle_events (wait_time); + if (handle_result == -1) + { + ACE_ERROR_RETURN ((LM_ERROR, + ACE_TEXT ("%p\n"), + ACE_TEXT ("ACE_Proactor::handle_events")), + -1); + } + } + + if (!handler.done_ || !handler.success_ || handler.bytes_transferred_ == 0) + { + ACE_ERROR ((LM_ERROR, + ACE_TEXT ("POSIX transmit_file completion invalid: done=%d success=%d bytes=%B error=%u\n"), + handler.done_, + handler.success_, + handler.bytes_transferred_, + handler.error_)); + file.remove (); + server_stream.close (); + client_stream.close (); + listen_socket.close (); + return -1; + } + + char received = '\0'; + const ssize_t recv_result = + ACE_OS::recv (client_stream.get_handle (), &received, 1, 0); + if (recv_result != 1 || received != 'u') + { + ACE_ERROR ((LM_ERROR, + ACE_TEXT ("POSIX transmit_file recv_result=%d byte=%c; expected 1/'u'\n"), + static_cast (recv_result), + received)); + file.remove (); + server_stream.close (); + client_stream.close (); + listen_socket.close (); + return -1; + } + } + + file.remove (); + server_stream.close (); + client_stream.close (); + listen_socket.close (); + return 0; + } + + int + run_close_singleton_quiesced_test (void) + { + ACE_Proactor *proactor_ptr = 0; + if (Proactor_Test_Backend::create_proactor (proactor_type, + 128, + proactor_ptr, + true) != 0) + { + ACE_ERROR_RETURN ((LM_ERROR, + ACE_TEXT ("%p\n"), + ACE_TEXT ("Proactor_Test_Backend::create_proactor")), + -1); + } + + int status = 0; + Timer_Handler handler; + // Drive one real async completion before tearing the singleton down. + if (proactor_ptr->schedule_timer (handler, 0, ACE_Time_Value (0, 1000)) == -1) + { + ACE_ERROR ((LM_ERROR, + ACE_TEXT ("%p\n"), + ACE_TEXT ("ACE_Proactor::schedule_timer"))); + status = -1; + } + else + { + const ACE_Time_Value deadline = ACE_OS::gettimeofday () + ACE_Time_Value (5); + while (!handler.done_ && ACE_OS::gettimeofday () < deadline) + { + ACE_Time_Value wait_time (0, 10000); + const int result = proactor_ptr->handle_events (wait_time); + if (result == -1) + { + ACE_ERROR ((LM_ERROR, + ACE_TEXT ("%p\n"), + ACE_TEXT ("ACE_Proactor::handle_events"))); + status = -1; + break; + } + } + + if (status == 0 && !handler.done_) + { + ACE_ERROR ((LM_ERROR, + ACE_TEXT ("Timed out waiting for singleton timer completion\n"))); + status = -1; + } + } + + if (!handler.done_) + (void) proactor_ptr->cancel_timer (handler); + + ACE_Proactor::close_singleton (); + return status; + } +} + +int +run_main (int argc, ACE_TCHAR *argv[]) +{ + ACE_START_TEST (ACE_TEXT ("Proactor_Contract_Test")); + + if (parse_args (argc, argv) != 0) + { + ACE_ERROR ((LM_ERROR, + ACE_TEXT ("Usage: %s [-t ]\n"), + argv[0])); + ACE_END_TEST; + return -1; + } + + ACE_Proactor *proactor_ptr = 0; + if (Proactor_Test_Backend::create_proactor (proactor_type, + 128, + proactor_ptr, + false) != 0) + { + ACE_END_TEST; + return -1; + } + ACE_Proactor &proactor = *proactor_ptr; + + int status = 0; + if (run_accept_contract_test (proactor) != 0) + status = -1; + else if (run_accept_cancel_test (proactor) != 0) + status = -1; + else if (run_connect_failure_cleanup_test (proactor) != 0) + status = -1; + else if (run_transmit_file_contract_test (proactor) != 0) + status = -1; + + delete proactor_ptr; + if (status == 0 && run_close_singleton_quiesced_test () != 0) + status = -1; + ACE_END_TEST; + return status; +} + +#else + +int +run_main (int, ACE_TCHAR *[]) +{ + ACE_START_TEST (ACE_TEXT ("Proactor_Contract_Test")); + ACE_DEBUG ((LM_INFO, + ACE_TEXT ("Asynchronous IO is unsupported.\n") + ACE_TEXT ("Proactor_Contract_Test will not be run.\n"))); + ACE_END_TEST; + return 0; +} + +#endif /* ACE_HAS_WIN32_OVERLAPPED_IO || ACE_HAS_AIO_CALLS */ diff --git a/ACE/tests/Proactor_File_Test.cpp b/ACE/tests/Proactor_File_Test.cpp index 0181d114995d1..cac511997a548 100644 --- a/ACE/tests/Proactor_File_Test.cpp +++ b/ACE/tests/Proactor_File_Test.cpp @@ -14,7 +14,7 @@ #if defined (ACE_HAS_WIN32_OVERLAPPED_IO) || defined (ACE_HAS_AIO_CALLS) // This only works on Win32 platforms and on Unix platforms - // supporting POSIX aio calls. + // supporting POSIX aio calls or io_uring. ////////////////////////////////////////////////////////////////// @@ -37,6 +37,8 @@ #include "ace/Proactor.h" #include "ace/Asynch_Connector.h" #include "ace/Time_Value.h" +#include "ace/Get_Opt.h" +#include "Proactor_Test_Backend.h" // How long are our fake serial I/O frames? @@ -51,6 +53,12 @@ class FileIOHandler : public ACE_Handler int Connect(); + int + start_timer (const ACE_Time_Value &interval); + + void + shutdown (void); + // This method will be called when an asynchronous read // completes on a file. virtual void @@ -69,7 +77,12 @@ class FileIOHandler : public ACE_Handler ACE_Asynch_Read_File reader_; ACE_Asynch_Write_File writer_; private: + void + cancel_timer (void); + int block_count_; + long timer_id_; + bool shutting_down_; #if defined (ACE_WIN32) bool read_pending_; #endif @@ -80,6 +93,8 @@ class FileIOHandler : public ACE_Handler FileIOHandler::FileIOHandler () : ACE_Handler () , block_count_ (0) + , timer_id_ (-1) + , shutting_down_ (false) #if defined (ACE_WIN32) , read_pending_ (false) #endif @@ -198,6 +213,31 @@ int FileIOHandler::Connect() return result; } +int +FileIOHandler::start_timer (const ACE_Time_Value &interval) +{ + this->timer_id_ = + ACE_Proactor::instance ()->schedule_repeating_timer (*this, 0, interval); + return this->timer_id_ == -1 ? -1 : 0; +} + +void +FileIOHandler::cancel_timer (void) +{ + if (this->timer_id_ != -1) + { + ACE_Proactor::instance ()->cancel_timer (this->timer_id_); + this->timer_id_ = -1; + } +} + +void +FileIOHandler::shutdown (void) +{ + this->shutting_down_ = true; + this->cancel_timer (); +} + //*************************************************************************** // // Method: handle_read_file @@ -213,13 +253,25 @@ void FileIOHandler::handle_read_file(const ACE_Asynch_Read_File::Result &result) { ACE_Message_Block &mb = result.message_block(); + if (this->shutting_down_) + { + mb.release(); +#if defined (ACE_WIN32) + this->read_pending_ = false; +#endif + return; + } + + unsigned long const next_offset = + result.offset () + static_cast (result.bytes_transferred ()); + // If the read failed, queue up another one using the same message block if (!result.success() || result.bytes_transferred() == 0) { //ACE_DEBUG((LM_INFO, ACE_TEXT("FileIOHandler receive timeout.\n"))); reader_.read(mb, mb.space(), - result.offset () + result.bytes_transferred ()); + next_offset); } else { @@ -237,13 +289,13 @@ FileIOHandler::handle_read_file(const ACE_Asynch_Read_File::Result &result) // Release the message block when we're done with it mb.release(); - if ((result.offset () + result.bytes_transferred ()) < 256) + if (next_offset < 256) { // Our processing is done; prime the read process again ACE_Message_Block *new_mb; ACE_NEW_NORETURN(new_mb, ACE_Message_Block(FILE_FRAME_SIZE)); if (reader_.read(*new_mb, new_mb->space(), - result.offset () + result.bytes_transferred ()) != 0) + next_offset) != 0) { int errnr = ACE_OS::last_error (); ACE_DEBUG( @@ -261,6 +313,7 @@ FileIOHandler::handle_read_file(const ACE_Asynch_Read_File::Result &result) else { // we have it all; stop the proactor + this->shutdown (); ACE_Proactor::instance ()->proactor_end_event_loop (); } } @@ -284,6 +337,10 @@ FileIOHandler::handle_write_file(const ACE_Asynch_Write_File::Result &result) // When the write completes, we get the message block. It's been sent, // so we just deallocate it. result.message_block().release(); + if (this->shutting_down_) + { + return; + } #if defined (ACE_WIN32) // to circumvent problems on older Win32 (see above) we schedule a read here if none // is pending yet. @@ -322,6 +379,11 @@ FileIOHandler::handle_write_file(const ACE_Asynch_Write_File::Result &result) void FileIOHandler::handle_time_out(const ACE_Time_Value & /*tv*/, const void * /*act*/) { + if (this->shutting_down_ || this->block_count_ >= 16) + { + return; + } + // do not schedule more than 16 writes if (this->block_count_ < 16) { @@ -343,6 +405,10 @@ FileIOHandler::handle_time_out(const ACE_Time_Value & /*tv*/, const void * /*act { ACE_DEBUG((LM_INFO, ACE_TEXT("Successfully queued write of %d bytes\n"), new_mb->length ())); // success this->block_count_ ++; // next block + if (this->block_count_ >= 16) + { + this->cancel_timer (); + } } else { @@ -353,33 +419,68 @@ FileIOHandler::handle_time_out(const ACE_Time_Value & /*tv*/, const void * /*act int -run_main(int /*argc*/, ACE_TCHAR * /*argv*/[]) +run_main(int argc, ACE_TCHAR *argv[]) { + ACE_Proactor *proactor = 0; + Proactor_Test_Backend::Type backend = Proactor_Test_Backend::BACKEND_DEFAULT; + ACE_Get_Opt get_opt (argc, argv, ACE_TEXT ("t:")); + int c = 0; + while ((c = get_opt ()) != EOF) + { + switch (c) + { + case 't': + if (Proactor_Test_Backend::parse_type (get_opt.opt_arg (), backend) == 0) + break; + Proactor_Test_Backend::print_type_usage (argv[0]); + return -1; + default: + Proactor_Test_Backend::print_type_usage (argv[0]); + return -1; + } + } + ACE_START_TEST (ACE_TEXT ("Proactor_File_Test")); - int rc = 0; - FileIOHandler fileIOHandler; + if (Proactor_Test_Backend::create_proactor (backend, 128, proactor, true) != 0) + { + ACE_END_TEST; + return -1; + } - // Initialize the serial port handler - if (0 != fileIOHandler.Connect()) - { - rc = 1; - } - else + int rc = 0; { - ACE_DEBUG((LM_INFO, ACE_TEXT(" File I/O Handler connected.\n"))); + FileIOHandler fileIOHandler; - // start the repeating timer for data transmission + // Initialize the serial port handler + if (0 != fileIOHandler.Connect()) + { + rc = 1; + } + else + { + ACE_DEBUG((LM_INFO, ACE_TEXT(" File I/O Handler connected.\n"))); - ACE_Time_Value repeatTime(0, 50000); // 0.05 second time interval - ACE_Proactor::instance()->schedule_repeating_timer(fileIOHandler, - (void *) (100), - repeatTime); + // start the repeating timer for data transmission - // Run the Proactor - ACE_Proactor::instance()->proactor_run_event_loop(); + ACE_Time_Value repeatTime(0, 50000); // 0.05 second time interval + if (fileIOHandler.start_timer (repeatTime) != 0) + rc = 1; + + if (rc == 0) + // Run the Proactor + ACE_Proactor::instance()->proactor_run_event_loop(); + } + + fileIOHandler.shutdown (); } +#if defined (ACE_WIN32) + ACE_DEBUG ((LM_DEBUG, + ACE_TEXT ("(%t) Skipping ACE_Proactor::close_singleton() on Windows test shutdown\n"))); +#else + ACE_Proactor::close_singleton (); +#endif ACE_END_TEST; return rc; diff --git a/ACE/tests/Proactor_Network_Performance_Test.cpp b/ACE/tests/Proactor_Network_Performance_Test.cpp new file mode 100644 index 0000000000000..68bbe44e83481 --- /dev/null +++ b/ACE/tests/Proactor_Network_Performance_Test.cpp @@ -0,0 +1,1816 @@ +// ============================================================================ +/** + * @file Proactor_Network_Performance_Test.cpp + * + * Benchmark-oriented network coverage for selectable ACE Proactor backends. + * The control plane uses synchronous socket setup while the data plane uses + * asynchronous I/O so throughput and progress behavior can be compared across + * backends and platforms. + */ +// ============================================================================ + +#include "test_config.h" + +#if defined (ACE_HAS_THREADS) && (defined (ACE_HAS_WIN32_OVERLAPPED_IO) || defined (ACE_HAS_AIO_CALLS)) + +#include "ace/ACE.h" +#include "ace/Asynch_IO.h" +#include "ace/Condition_Thread_Mutex.h" +#include "ace/Get_Opt.h" +#include "ace/INET_Addr.h" +#include "ace/Message_Block.h" +#include "ace/OS_NS_errno.h" +#include "ace/OS_NS_stdlib.h" +#include "ace/OS_NS_string.h" +#include "ace/OS_NS_sys_socket.h" +#include "ace/OS_NS_sys_time.h" +#include "ace/OS_NS_unistd.h" +#include "ace/Proactor.h" +#include "ace/Recursive_Thread_Mutex.h" +#include "ace/SOCK_Acceptor.h" +#include "ace/SOCK_CODgram.h" +#include "ace/SOCK_Connector.h" +#include "ace/SOCK_Dgram.h" +#include "ace/SOCK_Stream.h" +#include "ace/Sock_Connect.h" +#include "ace/Task.h" +#include "ace/Thread_Mutex.h" +#include "ace/Time_Value.h" +#include "ace/os_include/os_limits.h" +#include "ace/os_include/netinet/os_tcp.h" + +#include "Proactor_Test_Backend.h" + +namespace +{ + enum Transport + { + TRANSPORT_TCP, + TRANSPORT_UDP + }; + + struct Config + { + Config () + : backend (Proactor_Test_Backend::BACKEND_DEFAULT) + , transport (TRANSPORT_TCP) + , family (AF_INET) + , sessions (64) + , messages_per_endpoint (1024) + , payload_size (1024) + , write_depth (8) + , thread_count (4) + , max_aio_operations (0) + , listen_port (0) + , progress_timeout (10) + , overall_timeout (120) + , udp_end_markers (16) + , udp_receive_buffer (0) + , udp_send_buffer (0) + { + } + + Proactor_Test_Backend::Type backend; + Transport transport; + int family; + size_t sessions; + size_t messages_per_endpoint; + size_t payload_size; + size_t write_depth; + int thread_count; + size_t max_aio_operations; + u_short listen_port; + long progress_timeout; + long overall_timeout; + size_t udp_end_markers; + size_t udp_receive_buffer; + size_t udp_send_buffer; + }; + + const ACE_UINT32 UDP_MAGIC = 0x504E5054U; // PNPT + const ACE_UINT32 UDP_DATA = 1U; + const ACE_UINT32 UDP_END = 2U; + + struct Udp_Header + { + ACE_UINT32 magic_; + ACE_UINT32 type_; + ACE_UINT32 session_; + ACE_UINT32 reserved_; + }; + + bool + is_cancel_error (u_long error) + { +#if defined (ACE_WIN32) + return error == ERROR_OPERATION_ABORTED; +#else + return error == ECANCELED; +#endif /* ACE_WIN32 */ + } + + bool + is_retryable_socket_error (u_long error) + { +#if defined (ACE_WIN32) + return error == WSAEWOULDBLOCK || error == ERROR_IO_PENDING; +#else + return error == EAGAIN || error == EWOULDBLOCK; +#endif /* ACE_WIN32 */ + } + + bool + is_udp_peer_closed_error (u_long error) + { +#if defined (ACE_WIN32) + return error == WSAECONNRESET; +#else + return error == ECONNREFUSED; +#endif /* ACE_WIN32 */ + } + + const ACE_TCHAR * + transport_name (Transport transport) + { + return transport == TRANSPORT_TCP ? ACE_TEXT ("tcp") : ACE_TEXT ("udp"); + } + + const ACE_TCHAR * + family_name (int family) + { + return family == AF_INET6 ? ACE_TEXT ("ipv6") : ACE_TEXT ("ipv4"); + } + + ACE_Time_Value + time_now () + { + return ACE_OS::gettimeofday (); + } + + double + seconds_between (const ACE_Time_Value &start, + const ACE_Time_Value &end) + { + const ACE_Time_Value delta = end - start; + return static_cast (delta.sec ()) + + (static_cast (delta.usec ()) / 1000000.0); + } + + int parse_unsigned_long_arg (const ACE_TCHAR *text, + unsigned long &value); + int parse_size_arg (const ACE_TCHAR *text, + size_t &value); + int parse_u_short_arg (const ACE_TCHAR *text, + u_short &value); + int parse_int_arg (const ACE_TCHAR *text, + int &value); + int prepare_socket_handle (ACE_HANDLE handle); + int configure_udp_socket_buffer (ACE_HANDLE handle, + int option_name, + size_t requested_size, + size_t &actual_size); + int configure_udp_socket_buffers (ACE_HANDLE handle, + const Config &config, + size_t &actual_receive_size, + size_t &actual_send_size); + + class Benchmark_State + { + public: + Benchmark_State (const Config &config, + size_t expected_endpoints) + : config_ (config) + , lock_ () + , done_ (lock_) + , expected_endpoints_ (expected_endpoints) + , completed_endpoints_ (0) + , failed_ (false) + , stalled_ (false) + , timed_out_ (false) + , first_error_code_ (0) + , total_write_ops_ (0) + , total_read_ops_ (0) + , total_sent_bytes_ (0) + , total_received_bytes_ (0) + , udp_data_sent_messages_ (0) + , udp_data_received_messages_ (0) + , udp_socket_buffers_recorded_ (false) + , udp_receive_buffer_actual_ (0) + , udp_send_buffer_actual_ (0) + { + this->first_error_[0] = 0; + this->start_time_ = time_now (); + this->last_progress_time_ = this->start_time_; + this->end_time_ = this->start_time_; + } + + void note_write (size_t bytes, + bool count_as_data_message) + { + ACE_GUARD (ACE_Thread_Mutex, guard, this->lock_); + ++this->total_write_ops_; + this->total_sent_bytes_ += bytes; + if (count_as_data_message) + ++this->udp_data_sent_messages_; + this->last_progress_time_ = time_now (); + this->done_.broadcast (); + } + + void note_read (size_t bytes, + bool count_as_data_message) + { + ACE_GUARD (ACE_Thread_Mutex, guard, this->lock_); + ++this->total_read_ops_; + this->total_received_bytes_ += bytes; + if (count_as_data_message) + ++this->udp_data_received_messages_; + this->last_progress_time_ = time_now (); + this->done_.broadcast (); + } + + void note_error (const ACE_TCHAR *where, + size_t session_id, + const ACE_TCHAR *role, + u_long error = 0) + { + ACE_GUARD (ACE_Thread_Mutex, guard, this->lock_); + if (!this->failed_) + { + this->failed_ = true; + this->first_error_code_ = error; + ACE_OS::snprintf (this->first_error_, + sizeof (this->first_error_) / sizeof (this->first_error_[0]), + ACE_TEXT ("%s session=%lu side=%s"), + where, + static_cast (session_id), + role); + } + this->end_time_ = time_now (); + this->done_.broadcast (); + } + + void endpoint_done () + { + ACE_GUARD (ACE_Thread_Mutex, guard, this->lock_); + ++this->completed_endpoints_; + this->last_progress_time_ = time_now (); + if (this->completed_endpoints_ >= this->expected_endpoints_) + this->end_time_ = this->last_progress_time_; + this->done_.broadcast (); + } + + void note_udp_socket_buffers (size_t actual_receive_size, + size_t actual_send_size) + { + ACE_GUARD (ACE_Thread_Mutex, guard, this->lock_); + if (this->udp_socket_buffers_recorded_) + return; + this->udp_socket_buffers_recorded_ = true; + this->udp_receive_buffer_actual_ = actual_receive_size; + this->udp_send_buffer_actual_ = actual_send_size; + } + + bool wait_for_completion (void) + { + const ACE_Time_Value absolute_deadline = + time_now () + ACE_Time_Value (this->config_.overall_timeout); + + ACE_GUARD_RETURN (ACE_Thread_Mutex, guard, this->lock_, false); + + while (!this->failed_ && this->completed_endpoints_ < this->expected_endpoints_) + { + const ACE_Time_Value now = time_now (); + if (now >= absolute_deadline) + { + this->failed_ = true; + this->timed_out_ = true; + this->end_time_ = now; + break; + } + + const ACE_Time_Value progress_deadline = + this->last_progress_time_ + ACE_Time_Value (this->config_.progress_timeout); + if (now >= progress_deadline) + { + this->end_time_ = now; + this->failed_ = true; + this->stalled_ = true; + break; + } + + ACE_Time_Value wake = absolute_deadline < progress_deadline + ? absolute_deadline + : progress_deadline; + if (this->done_.wait (&wake) == -1 && errno != ETIME) + { + this->failed_ = true; + this->end_time_ = time_now (); + break; + } + } + + if (!this->failed_ && this->completed_endpoints_ >= this->expected_endpoints_) + this->end_time_ = time_now (); + + return !this->failed_; + } + + bool validate (void) const + { + ACE_GUARD_RETURN (ACE_Thread_Mutex, guard, const_cast (this->lock_), false); + return this->validate_i (); + } + + int report (void) const + { + ACE_GUARD_RETURN (ACE_Thread_Mutex, guard, const_cast (this->lock_), -1); + + const double elapsed = seconds_between (this->start_time_, this->end_time_); + const double safe_elapsed = elapsed > 0.0 ? elapsed : 0.000001; + const double send_mib = static_cast (this->total_sent_bytes_) / (1024.0 * 1024.0); + const double recv_mib = static_cast (this->total_received_bytes_) / (1024.0 * 1024.0); + const double send_mib_per_sec = send_mib / safe_elapsed; + const double recv_mib_per_sec = recv_mib / safe_elapsed; + const double completions_per_sec = + static_cast (this->total_write_ops_ + this->total_read_ops_) / safe_elapsed; + const double avg_write_us = + this->total_write_ops_ == 0 + ? 0.0 + : (safe_elapsed * 1000000.0) / static_cast (this->total_write_ops_); + const double avg_read_us = + this->total_read_ops_ == 0 + ? 0.0 + : (safe_elapsed * 1000000.0) / static_cast (this->total_read_ops_); + const double avg_completion_us = + (this->total_write_ops_ + this->total_read_ops_) == 0 + ? 0.0 + : (safe_elapsed * 1000000.0) + / static_cast (this->total_write_ops_ + this->total_read_ops_); + size_t lost_messages = 0; + double loss_pct = 0.0; + + ACE_DEBUG ((LM_INFO, + ACE_TEXT ("Benchmark config: backend=%s transport=%s family=%s sessions=%B ") + ACE_TEXT ("messages=%B payload=%B depth=%B threads=%d elapsed=%.3f sec\n"), + Proactor_Test_Backend::name (this->config_.backend), + transport_name (this->config_.transport), + family_name (this->config_.family), + this->config_.sessions, + this->config_.messages_per_endpoint, + this->config_.payload_size, + this->config_.write_depth, + this->config_.thread_count, + elapsed)); + + ACE_DEBUG ((LM_INFO, + ACE_TEXT ("Totals: write_ops=%B read_ops=%B sent=%B bytes recv=%B bytes ") + ACE_TEXT ("send=%.2f MiB/s recv=%.2f MiB/s completions=%.2f/s\n"), + this->total_write_ops_, + this->total_read_ops_, + this->total_sent_bytes_, + this->total_received_bytes_, + send_mib_per_sec, + recv_mib_per_sec, + completions_per_sec)); + + if (this->config_.transport == TRANSPORT_UDP) + { + const size_t expected_data_messages = + this->config_.sessions * this->config_.messages_per_endpoint; + lost_messages = + expected_data_messages > this->udp_data_received_messages_ + ? expected_data_messages - this->udp_data_received_messages_ + : 0; + loss_pct = expected_data_messages == 0 + ? 0.0 + : (100.0 * static_cast (lost_messages) + / static_cast (expected_data_messages)); + + ACE_DEBUG ((LM_INFO, + ACE_TEXT ("UDP delivery: data_sent=%B data_recv=%B lost=%B (%.2f%%)\n"), + this->udp_data_sent_messages_, + this->udp_data_received_messages_, + lost_messages, + loss_pct)); + ACE_DEBUG ((LM_INFO, + ACE_TEXT ("UDP socket buffers: req_rcv=%B req_snd=%B actual_rcv=%B actual_snd=%B\n"), + this->config_.udp_receive_buffer, + this->config_.udp_send_buffer, + this->udp_receive_buffer_actual_, + this->udp_send_buffer_actual_)); + } + + ACE_DEBUG ((LM_INFO, + ACE_TEXT ("PERF_RESULT benchmark=network backend=%s transport=%s family=%s ") + ACE_TEXT ("sessions=%B messages=%B payload=%B depth=%B threads=%d ") + ACE_TEXT ("elapsed_sec=%.6f send_mib_per_sec=%.6f recv_mib_per_sec=%.6f ") + ACE_TEXT ("completions_per_sec=%.6f avg_write_us=%.3f avg_read_us=%.3f ") + ACE_TEXT ("avg_completion_us=%.3f write_ops=%B read_ops=%B sent_bytes=%B ") + ACE_TEXT ("recv_bytes=%B udp_loss_pct=%.6f udp_lost=%B ") + ACE_TEXT ("udp_rcvbuf_req=%B udp_sndbuf_req=%B ") + ACE_TEXT ("udp_rcvbuf_actual=%B udp_sndbuf_actual=%B\n"), + Proactor_Test_Backend::name (this->config_.backend), + transport_name (this->config_.transport), + family_name (this->config_.family), + this->config_.sessions, + this->config_.messages_per_endpoint, + this->config_.payload_size, + this->config_.write_depth, + this->config_.thread_count, + elapsed, + send_mib_per_sec, + recv_mib_per_sec, + completions_per_sec, + avg_write_us, + avg_read_us, + avg_completion_us, + this->total_write_ops_, + this->total_read_ops_, + this->total_sent_bytes_, + this->total_received_bytes_, + loss_pct, + lost_messages, + this->config_.udp_receive_buffer, + this->config_.udp_send_buffer, + this->udp_receive_buffer_actual_, + this->udp_send_buffer_actual_)); + + if (this->failed_) + { + if (this->first_error_[0] != 0) + { + if (this->first_error_code_ != 0) + { + ACE_LOG_MSG->errnum (static_cast (this->first_error_code_)); + ACE_LOG_MSG->log (LM_ERROR, + ACE_TEXT ("Benchmark failed: %s: %p\n"), + this->first_error_, + ACE_TEXT ("error")); + } + else + { + ACE_ERROR ((LM_ERROR, + ACE_TEXT ("Benchmark failed: %s\n"), + this->first_error_)); + } + } + else if (this->stalled_) + { + ACE_ERROR ((LM_ERROR, + ACE_TEXT ("Benchmark failed: no progress for %d seconds\n"), + this->config_.progress_timeout)); + } + else if (this->timed_out_) + { + ACE_ERROR ((LM_ERROR, + ACE_TEXT ("Benchmark failed: timed out after %d seconds\n"), + this->config_.overall_timeout)); + } + else + { + ACE_ERROR ((LM_ERROR, + ACE_TEXT ("Benchmark failed\n"))); + } + return -1; + } + + if (!this->validate_i ()) + { + ACE_ERROR ((LM_ERROR, + ACE_TEXT ("Benchmark completed but validation failed\n"))); + return -1; + } + + return 0; + } + + private: + bool validate_i (void) const + { + if (this->failed_) + return false; + + const size_t expected_data_messages = + this->config_.transport == TRANSPORT_UDP + ? this->config_.sessions * this->config_.messages_per_endpoint + : this->config_.sessions * this->config_.messages_per_endpoint * 2; + const size_t expected_data_bytes = expected_data_messages * this->config_.payload_size; + + if (this->config_.transport == TRANSPORT_TCP) + return this->total_sent_bytes_ == expected_data_bytes + && this->total_received_bytes_ == expected_data_bytes; + + return this->total_sent_bytes_ >= expected_data_bytes + && this->udp_data_sent_messages_ >= expected_data_messages + && this->udp_data_received_messages_ > 0 + && this->total_received_bytes_ > 0; + } + + Config config_; + mutable ACE_Thread_Mutex lock_; + ACE_Condition_Thread_Mutex done_; + ACE_Time_Value start_time_; + ACE_Time_Value last_progress_time_; + ACE_Time_Value end_time_; + size_t expected_endpoints_; + size_t completed_endpoints_; + bool failed_; + bool stalled_; + bool timed_out_; + u_long first_error_code_; + ACE_TCHAR first_error_[256]; + size_t total_write_ops_; + size_t total_read_ops_; + size_t total_sent_bytes_; + size_t total_received_bytes_; + size_t udp_data_sent_messages_; + size_t udp_data_received_messages_; + bool udp_socket_buffers_recorded_; + size_t udp_receive_buffer_actual_; + size_t udp_send_buffer_actual_; + }; + + class Proactor_Task : public ACE_Task + { + public: + Proactor_Task () + : proactor_ (0) + { + } + + ~Proactor_Task () + { + this->stop (); + } + + int init_backend (const Config &config) + { + if (this->proactor_ != 0) + return -1; + + size_t max_ops = config.max_aio_operations; + if (max_ops == 0) + { + const size_t read_ops_per_session = + config.transport == TRANSPORT_TCP ? 2 : 1; + const size_t write_ops_per_session = + config.transport == TRANSPORT_TCP ? config.write_depth * 2 + : config.write_depth; + + // The benchmark seeds all initial endpoints before the proactor + // threads start, so the implicit slot budget must cover that + // startup burst rather than only the steady-state write depth. + max_ops = + config.sessions * (read_ops_per_session + write_ops_per_session) + 32; + if (max_ops < 256) + max_ops = 256; + } + + if (Proactor_Test_Backend::create_proactor (config.backend, + max_ops, + this->proactor_, + true) != 0) + return -1; + + return 0; + } + + int start_threads (int thread_count) + { + if (this->proactor_ == 0) + return -1; + return this->activate (THR_NEW_LWP | THR_JOINABLE, thread_count); + } + + int stop (void) + { + ACE_Proactor *proactor = this->proactor_; + if (proactor == 0) + return 0; + + proactor->proactor_end_event_loop (); + this->wait (); + ACE_Proactor::close_singleton (); + this->proactor_ = 0; + return 0; + } + + virtual int svc (void) + { + return ACE_Proactor::instance ()->proactor_run_event_loop (); + } + + private: + ACE_Proactor *proactor_; + }; + + class Endpoint_Base : public ACE_Handler + { + public: + Endpoint_Base (Benchmark_State &state, + const Config &config, + size_t session_id, + const ACE_TCHAR *role) + : state_ (state) + , config_ (config) + , session_id_ (session_id) + , role_ (role) + , lock_ () + , handle_ (ACE_INVALID_HANDLE) + , failed_ (false) + , completed_ (false) + { + } + + virtual ~Endpoint_Base (void) + { + this->close_handle (); + } + + protected: + void close_handle (void) + { + if (this->handle_ != ACE_INVALID_HANDLE) + { + ACE_OS::closesocket (this->handle_); + this->handle_ = ACE_INVALID_HANDLE; + } + } + + void report_error (const ACE_TCHAR *where, + u_long error = 0) + { + if (!this->failed_) + { + this->failed_ = true; + this->state_.note_error (where, this->session_id_, this->role_, error); + } + } + + Benchmark_State &state_; + Config config_; + size_t session_id_; + const ACE_TCHAR *role_; + mutable ACE_Recursive_Thread_Mutex lock_; + ACE_HANDLE handle_; + bool failed_; + bool completed_; + }; + + class Stream_Endpoint : public Endpoint_Base + { + public: + Stream_Endpoint (Benchmark_State &state, + const Config &config, + size_t session_id, + const ACE_TCHAR *role) + : Endpoint_Base (state, config, session_id, role) + , rs_ () + , ws_ () + , sends_remaining_ (config.messages_per_endpoint) + , expected_receive_bytes_ (config.messages_per_endpoint * config.payload_size) + , received_bytes_ (0) + , writes_inflight_ (0) + , read_inflight_ (false) + , write_shutdown_ (false) + , read_complete_ (false) + { + } + + void start (ACE_HANDLE handle) + { + ACE_GUARD (ACE_Recursive_Thread_Mutex, guard, this->lock_); + this->handle_ = handle; + this->set_nodelay (); + + if (this->ws_.open (*this, this->handle_) != 0) + { + this->report_error (ACE_TEXT ("stream write open"), ACE_OS::last_error ()); + delete this; + return; + } + + if (this->rs_.open (*this, this->handle_) != 0) + { + this->report_error (ACE_TEXT ("stream read open"), ACE_OS::last_error ()); + delete this; + return; + } + + if (this->issue_read () == -1) + { + delete this; + return; + } + + this->issue_writes (); + if (this->maybe_finish ()) + delete this; + } + + virtual void handle_read_stream (const ACE_Asynch_Read_Stream::Result &result) + { + bool delete_self = false; + ACE_GUARD (ACE_Recursive_Thread_Mutex, guard, this->lock_); + ACE_Message_Block &mb = result.message_block (); + this->read_inflight_ = false; + + if (result.error () != 0) + { + mb.release (); + if (is_retryable_socket_error (result.error ())) + { + (void) this->issue_read (); + } + else if (!is_cancel_error (result.error ())) + this->report_error (ACE_TEXT ("stream read"), result.error ()); + delete_self = this->maybe_finish (); + if (delete_self) + { + guard.release (); + delete this; + } + return; + } + + if (result.bytes_transferred () == 0) + { + mb.release (); + if (this->received_bytes_ < this->expected_receive_bytes_) + this->report_error (ACE_TEXT ("stream unexpected eof")); + else + this->read_complete_ = true; + delete_self = this->maybe_finish (); + if (delete_self) + { + guard.release (); + delete this; + } + return; + } + + this->received_bytes_ += result.bytes_transferred (); + this->state_.note_read (result.bytes_transferred (), false); + mb.release (); + + if (this->received_bytes_ >= this->expected_receive_bytes_) + this->read_complete_ = true; + else + (void) this->issue_read (); + + delete_self = this->maybe_finish (); + if (delete_self) + { + guard.release (); + delete this; + } + } + + virtual void handle_write_stream (const ACE_Asynch_Write_Stream::Result &result) + { + bool delete_self = false; + ACE_GUARD (ACE_Recursive_Thread_Mutex, guard, this->lock_); + ACE_Message_Block &mb = result.message_block (); + + if (result.error () != 0) + { + if (is_retryable_socket_error (result.error ())) + { + if (this->ws_.write (mb, result.bytes_to_write ()) == 0) + return; + } + mb.release (); + if (!is_cancel_error (result.error ())) + this->report_error (ACE_TEXT ("stream write"), result.error ()); + if (this->writes_inflight_ > 0) + --this->writes_inflight_; + delete_self = this->maybe_finish (); + if (delete_self) + { + guard.release (); + delete this; + } + return; + } + + if (result.bytes_transferred () < result.bytes_to_write ()) + { + this->state_.note_write (result.bytes_transferred (), false); + if (this->ws_.write (mb, + result.bytes_to_write () - result.bytes_transferred ()) != 0) + { + mb.release (); + this->report_error (ACE_TEXT ("stream partial rewrite"), ACE_OS::last_error ()); + if (this->writes_inflight_ > 0) + --this->writes_inflight_; + } + return; + } + + this->state_.note_write (result.bytes_transferred (), false); + mb.release (); + if (this->writes_inflight_ > 0) + --this->writes_inflight_; + + this->issue_writes (); + delete_self = this->maybe_finish (); + if (delete_self) + { + guard.release (); + delete this; + } + } + + private: + void set_nodelay (void) + { + ACE_SOCK_Stream stream; + stream.set_handle (this->handle_); + int nodelay = 1; + (void) stream.set_option (ACE_IPPROTO_TCP, + TCP_NODELAY, + &nodelay, + sizeof (nodelay)); + stream.set_handle (ACE_INVALID_HANDLE); + } + + int issue_read (void) + { + if (this->failed_ || this->completed_ || this->read_inflight_) + return -1; + + const size_t read_size = + this->config_.payload_size * (this->config_.write_depth > 0 ? this->config_.write_depth : 1); + ACE_Message_Block *mb = 0; + ACE_NEW_RETURN (mb, ACE_Message_Block (read_size), -1); + + if (this->rs_.read (*mb, mb->space ()) == -1) + { + mb->release (); + this->report_error (ACE_TEXT ("stream read issue"), ACE_OS::last_error ()); + return -1; + } + + this->read_inflight_ = true; + return 0; + } + + void issue_writes (void) + { + while (!this->failed_ + && !this->completed_ + && this->sends_remaining_ > 0 + && this->writes_inflight_ < this->config_.write_depth) + { + ACE_Message_Block *mb = 0; + ACE_NEW (mb, ACE_Message_Block (this->config_.payload_size)); + if (mb == 0) + { + this->report_error (ACE_TEXT ("stream message_block alloc")); + return; + } + + ACE_OS::memset (mb->wr_ptr (), + static_cast ('A' + (this->session_id_ % 26)), + this->config_.payload_size); + mb->wr_ptr (this->config_.payload_size); + + if (this->ws_.write (*mb, this->config_.payload_size) == -1) + { + mb->release (); + this->report_error (ACE_TEXT ("stream write issue"), ACE_OS::last_error ()); + return; + } + + --this->sends_remaining_; + ++this->writes_inflight_; + } + } + + bool maybe_finish (void) + { + if (this->completed_) + return false; + + if (!this->write_shutdown_ + && this->sends_remaining_ == 0 + && this->writes_inflight_ == 0 + && this->handle_ != ACE_INVALID_HANDLE) + { + ACE_OS::shutdown (this->handle_, ACE_SHUTDOWN_WRITE); + this->write_shutdown_ = true; + } + + if (this->failed_) + { + if (this->writes_inflight_ == 0 && !this->read_inflight_) + { + this->completed_ = true; + this->state_.endpoint_done (); + return true; + } + return false; + } + + if (this->write_shutdown_ + && this->read_complete_ + && this->writes_inflight_ == 0 + && !this->read_inflight_) + { + this->completed_ = true; + this->state_.endpoint_done (); + return true; + } + return false; + } + + ACE_Asynch_Read_Stream rs_; + ACE_Asynch_Write_Stream ws_; + size_t sends_remaining_; + size_t expected_receive_bytes_; + size_t received_bytes_; + size_t writes_inflight_; + bool read_inflight_; + bool write_shutdown_; + bool read_complete_; + }; + + class Datagram_Endpoint : public Endpoint_Base + { + public: + Datagram_Endpoint (Benchmark_State &state, + const Config &config, + size_t session_id, + const ACE_TCHAR *role, + const ACE_INET_Addr &peer_addr, + bool send_enabled, + bool receive_enabled) + : Endpoint_Base (state, config, session_id, role) + , rs_ () + , ws_ () + , peer_addr_ (peer_addr) + , send_enabled_ (send_enabled) + , receive_enabled_ (receive_enabled) + , sends_remaining_ (config.messages_per_endpoint) + , end_markers_remaining_ (config.udp_end_markers) + , writes_inflight_ (0) + , read_inflight_ (false) + , peer_end_seen_ (false) + , sends_complete_ (false) + { + } + + void start (ACE_HANDLE handle) + { + ACE_GUARD (ACE_Recursive_Thread_Mutex, guard, this->lock_); + this->handle_ = handle; + + if (this->send_enabled_ && this->ws_.open (*this, this->handle_) != 0) + { + this->report_error (ACE_TEXT ("dgram write open"), ACE_OS::last_error ()); + delete this; + return; + } + + if (this->receive_enabled_ && this->rs_.open (*this, this->handle_) != 0) + { + this->report_error (ACE_TEXT ("dgram read open"), ACE_OS::last_error ()); + delete this; + return; + } + + if (this->receive_enabled_ && this->issue_read () == -1) + { + delete this; + return; + } + + if (this->send_enabled_) + this->issue_writes (); + else + { + this->sends_remaining_ = 0; + this->end_markers_remaining_ = 0; + this->sends_complete_ = true; + } + + if (this->maybe_finish ()) + delete this; + } + + virtual void handle_read_dgram (const ACE_Asynch_Read_Dgram::Result &result) + { + bool delete_self = false; + ACE_GUARD (ACE_Recursive_Thread_Mutex, guard, this->lock_); + ACE_Message_Block *mb = result.message_block (); + this->read_inflight_ = false; + + if (result.error () != 0) + { + mb->release (); + if (is_retryable_socket_error (result.error ())) + { + (void) this->issue_read (); + } + else if (!is_cancel_error (result.error ())) + this->report_error (ACE_TEXT ("dgram read"), result.error ()); + delete_self = this->maybe_finish (); + if (delete_self) + { + guard.release (); + delete this; + } + return; + } + + if (result.bytes_transferred () < sizeof (Udp_Header)) + { + mb->release (); + this->report_error (ACE_TEXT ("dgram short header")); + delete_self = this->maybe_finish (); + if (delete_self) + { + guard.release (); + delete this; + } + return; + } + + const Udp_Header *header = + reinterpret_cast (mb->rd_ptr ()); + + if (header->magic_ != UDP_MAGIC || header->session_ != this->session_id_) + { + mb->release (); + this->report_error (ACE_TEXT ("dgram invalid header")); + delete_self = this->maybe_finish (); + if (delete_self) + { + guard.release (); + delete this; + } + return; + } + + if (header->type_ == UDP_DATA) + this->state_.note_read (result.bytes_transferred (), true); + else if (header->type_ == UDP_END) + { + this->peer_end_seen_ = true; + this->state_.note_read (result.bytes_transferred (), false); + } + else + { + mb->release (); + this->report_error (ACE_TEXT ("dgram invalid type")); + delete_self = this->maybe_finish (); + if (delete_self) + { + guard.release (); + delete this; + } + return; + } + + mb->release (); + + if (this->receive_enabled_ && !this->peer_end_seen_) + (void) this->issue_read (); + + delete_self = this->maybe_finish (); + if (delete_self) + { + guard.release (); + delete this; + } + } + + virtual void handle_write_dgram (const ACE_Asynch_Write_Dgram::Result &result) + { + bool delete_self = false; + ACE_GUARD (ACE_Recursive_Thread_Mutex, guard, this->lock_); + ACE_Message_Block *mb = result.message_block (); + const bool data_message = mb->msg_type () == ACE_Message_Block::MB_DATA; + + if (result.error () != 0) + { + if (is_retryable_socket_error (result.error ())) + { + size_t bytes_to_send = result.bytes_to_write (); + if (this->ws_.send (mb, + bytes_to_send, + 0, + this->peer_addr_) == 0) + return; + } + const bool ignore_peer_closed = + !data_message && is_udp_peer_closed_error (result.error ()); + mb->release (); + if (ignore_peer_closed) + { + this->end_markers_remaining_ = 0; + this->sends_complete_ = true; + } + else if (!is_cancel_error (result.error ())) + this->report_error (ACE_TEXT ("dgram write"), result.error ()); + if (this->writes_inflight_ > 0) + --this->writes_inflight_; + delete_self = this->maybe_finish (); + if (delete_self) + { + guard.release (); + delete this; + } + return; + } + + this->state_.note_write (result.bytes_transferred (), data_message); + mb->release (); + + if (this->writes_inflight_ > 0) + --this->writes_inflight_; + + this->issue_writes (); + delete_self = this->maybe_finish (); + if (delete_self) + { + guard.release (); + delete this; + } + } + + private: + int issue_read (void) + { + if (this->failed_ || this->completed_ || this->read_inflight_) + return -1; + + ACE_Message_Block *mb = 0; + ACE_NEW_RETURN (mb, ACE_Message_Block (this->config_.payload_size), -1); + size_t bytes_received = 0; + if (this->rs_.recv (mb, + bytes_received, + 0, + this->config_.family) == -1) + { + mb->release (); + this->report_error (ACE_TEXT ("dgram recv issue"), ACE_OS::last_error ()); + return -1; + } + + this->read_inflight_ = true; + return 0; + } + + void issue_writes (void) + { + while (this->send_enabled_ + && !this->failed_ + && !this->completed_ + && this->writes_inflight_ < this->config_.write_depth) + { + ACE_UINT32 type = 0; + if (this->sends_remaining_ > 0) + { + type = UDP_DATA; + --this->sends_remaining_; + } + else if (this->end_markers_remaining_ > 0) + { + type = UDP_END; + --this->end_markers_remaining_; + } + else + { + this->sends_complete_ = true; + return; + } + + ACE_Message_Block *mb = 0; + ACE_NEW (mb, ACE_Message_Block (this->config_.payload_size)); + if (mb == 0) + { + this->report_error (ACE_TEXT ("dgram message_block alloc")); + return; + } + + ACE_OS::memset (mb->wr_ptr (), 0, this->config_.payload_size); + Udp_Header *header = reinterpret_cast (mb->wr_ptr ()); + header->magic_ = UDP_MAGIC; + header->type_ = type; + header->session_ = static_cast (this->session_id_); + header->reserved_ = 0; + mb->msg_type (type == UDP_DATA + ? ACE_Message_Block::MB_DATA + : ACE_Message_Block::MB_PROTO); + + if (type == UDP_DATA && this->config_.payload_size > sizeof (Udp_Header)) + ACE_OS::memset (mb->wr_ptr () + sizeof (Udp_Header), + static_cast ('a' + (this->session_id_ % 26)), + this->config_.payload_size - sizeof (Udp_Header)); + + mb->wr_ptr (this->config_.payload_size); + + if (this->ws_.send (mb, this->config_.payload_size, 0, this->peer_addr_) == -1) + { + mb->release (); + this->report_error (ACE_TEXT ("dgram send issue"), ACE_OS::last_error ()); + return; + } + + ++this->writes_inflight_; + } + } + + bool maybe_finish (void) + { + if (this->completed_) + return false; + + if (this->failed_) + { + if (this->writes_inflight_ == 0 && !this->read_inflight_) + { + this->completed_ = true; + this->state_.endpoint_done (); + return true; + } + return false; + } + + if (this->sends_remaining_ == 0 && this->end_markers_remaining_ == 0) + this->sends_complete_ = true; + + const bool writes_done = + !this->send_enabled_ || (this->sends_complete_ && this->writes_inflight_ == 0); + const bool reads_done = + !this->receive_enabled_ || (this->peer_end_seen_ && !this->read_inflight_); + + if (writes_done && reads_done) + { + this->completed_ = true; + this->state_.endpoint_done (); + return true; + } + return false; + } + + ACE_Asynch_Read_Dgram rs_; + ACE_Asynch_Write_Dgram ws_; + ACE_INET_Addr peer_addr_; + bool send_enabled_; + bool receive_enabled_; + size_t sends_remaining_; + size_t end_markers_remaining_; + size_t writes_inflight_; + bool read_inflight_; + bool peer_end_seen_; + bool sends_complete_; + }; + + int + print_usage (ACE_TCHAR *argv0) + { + ACE_ERROR ((LM_ERROR, + ACE_TEXT ("Usage: %s [-t ] [-u] [-4|-6] ") + ACE_TEXT ("[-n sessions] [-m messages] [-b payload] ") + ACE_TEXT ("[-w write_depth] [-T threads] [-a max_aio_ops] [-p port] ") + ACE_TEXT ("[-R udp_rcvbuf] [-S udp_sndbuf]\n"), + argv0)); + ACE_ERROR ((LM_ERROR, + ACE_TEXT (" default transport is tcp, default family is ipv4\n"))); + return Proactor_Test_Backend::print_type_usage (argv0); + } + + int + parse_args (int argc, + ACE_TCHAR *argv[], + Config &config) + { + ACE_Get_Opt get_opt (argc, argv, ACE_TEXT ("46uR:S:a:b:m:n:p:t:T:w:")); + int c = 0; + + while ((c = get_opt ()) != EOF) + { + switch (c) + { + case '4': + config.family = AF_INET; + break; + case '6': + config.family = AF_INET6; + break; + case 'u': + config.transport = TRANSPORT_UDP; + break; + case 'R': + if (parse_size_arg (get_opt.opt_arg (), + config.udp_receive_buffer) != 0) + return -1; + break; + case 'S': + if (parse_size_arg (get_opt.opt_arg (), + config.udp_send_buffer) != 0) + return -1; + break; + case 'a': + if (parse_size_arg (get_opt.opt_arg (), + config.max_aio_operations) != 0) + return -1; + break; + case 'b': + if (parse_size_arg (get_opt.opt_arg (), + config.payload_size) != 0) + return -1; + break; + case 'm': + if (parse_size_arg (get_opt.opt_arg (), + config.messages_per_endpoint) != 0) + return -1; + break; + case 'n': + if (parse_size_arg (get_opt.opt_arg (), + config.sessions) != 0) + return -1; + break; + case 'p': + if (parse_u_short_arg (get_opt.opt_arg (), + config.listen_port) != 0) + return -1; + break; + case 't': + if (Proactor_Test_Backend::parse_type (get_opt.opt_arg (), config.backend) != 0 + || !Proactor_Test_Backend::is_available (config.backend)) + return -1; + break; + case 'T': + if (parse_int_arg (get_opt.opt_arg (), + config.thread_count) != 0) + return -1; + break; + case 'w': + if (parse_size_arg (get_opt.opt_arg (), + config.write_depth) != 0) + return -1; + break; + default: + return -1; + } + } + + if (config.sessions == 0 + || config.messages_per_endpoint == 0 + || config.payload_size == 0 + || config.write_depth == 0 + || config.thread_count <= 0) + return -1; + + if (config.transport == TRANSPORT_UDP && config.payload_size < sizeof (Udp_Header)) + return -1; + + if (config.transport != TRANSPORT_UDP + && (config.udp_receive_buffer != 0 || config.udp_send_buffer != 0)) + return -1; + + return 0; + } + + int + parse_unsigned_long_arg (const ACE_TCHAR *text, + unsigned long &value) + { + if (text == 0 || *text == 0) + return -1; + + for (const ACE_TCHAR *pos = text; *pos != 0; ++pos) + if (*pos < static_cast ('0') + || *pos > static_cast ('9')) + return -1; + + ACE_TCHAR *end = 0; + errno = 0; + const unsigned long parsed = ACE_OS::strtoul (text, &end, 10); + if (errno != 0 || end == text || (end != 0 && *end != 0)) + return -1; + + value = parsed; + return 0; + } + + int + parse_size_arg (const ACE_TCHAR *text, + size_t &value) + { + unsigned long parsed = 0; + if (parse_unsigned_long_arg (text, parsed) != 0) + return -1; + + size_t const converted = static_cast (parsed); + if (static_cast (converted) != parsed) + return -1; + + value = converted; + return 0; + } + + int + parse_u_short_arg (const ACE_TCHAR *text, + u_short &value) + { + unsigned long parsed = 0; + if (parse_unsigned_long_arg (text, parsed) != 0 + || parsed > USHRT_MAX) + return -1; + + value = static_cast (parsed); + return 0; + } + + int + parse_int_arg (const ACE_TCHAR *text, + int &value) + { + unsigned long parsed = 0; + if (parse_unsigned_long_arg (text, parsed) != 0 + || parsed > INT_MAX) + return -1; + + value = static_cast (parsed); + return 0; + } + + ACE_INET_Addr + loopback_addr (u_short port, + int family) + { + const ACE_TCHAR *host = + family == AF_INET6 ? ACE_TEXT ("::1") : ACE_TEXT ("127.0.0.1"); + return ACE_INET_Addr (port, host, family); + } + + int + run_tcp_benchmark (const Config &config) + { + Proactor_Task task; + if (task.init_backend (config) != 0) + ACE_ERROR_RETURN ((LM_ERROR, + ACE_TEXT ("Failed to start proactor backend %s\n"), + Proactor_Test_Backend::name (config.backend)), + -1); + + Benchmark_State state (config, config.sessions * 2); + ACE_SOCK_Acceptor acceptor; + ACE_INET_Addr listen_addr = loopback_addr (config.listen_port, config.family); + if (acceptor.open (listen_addr, 1) != 0) + { + task.stop (); + ACE_ERROR_RETURN ((LM_ERROR, + ACE_TEXT ("%p\n"), + ACE_TEXT ("acceptor.open")), + -1); + } + + if (acceptor.get_local_addr (listen_addr) != 0) + { + task.stop (); + ACE_ERROR_RETURN ((LM_ERROR, + ACE_TEXT ("%p\n"), + ACE_TEXT ("acceptor.get_local_addr")), + -1); + } + + for (size_t index = 0; index < config.sessions; ++index) + { + ACE_SOCK_Connector connector; + ACE_SOCK_Stream client_stream; + ACE_SOCK_Stream server_stream; + + if (connector.connect (client_stream, listen_addr) != 0) + { + task.stop (); + ACE_ERROR_RETURN ((LM_ERROR, + ACE_TEXT ("%p\n"), + ACE_TEXT ("connector.connect")), + -1); + } + + if (acceptor.accept (server_stream) != 0) + { + task.stop (); + ACE_ERROR_RETURN ((LM_ERROR, + ACE_TEXT ("%p\n"), + ACE_TEXT ("acceptor.accept")), + -1); + } + + const ACE_HANDLE client_handle = client_stream.get_handle (); + const ACE_HANDLE server_handle = server_stream.get_handle (); + client_stream.set_handle (ACE_INVALID_HANDLE); + server_stream.set_handle (ACE_INVALID_HANDLE); + + if (prepare_socket_handle (client_handle) != 0 + || prepare_socket_handle (server_handle) != 0) + { + ACE_OS::closesocket (client_handle); + ACE_OS::closesocket (server_handle); + task.stop (); + ACE_ERROR_RETURN ((LM_ERROR, + ACE_TEXT ("%p\n"), + ACE_TEXT ("set_flags")), + -1); + } + + Stream_Endpoint *client = 0; + Stream_Endpoint *server = 0; + ACE_NEW_RETURN (client, + Stream_Endpoint (state, config, index, ACE_TEXT ("client")), + -1); + ACE_NEW_RETURN (server, + Stream_Endpoint (state, config, index, ACE_TEXT ("server")), + -1); + client->start (client_handle); + server->start (server_handle); + } + + if (task.start_threads (config.thread_count) != 0) + { + task.stop (); + ACE_ERROR_RETURN ((LM_ERROR, + ACE_TEXT ("%p\n"), + ACE_TEXT ("activate")), + -1); + } + + const bool completed = state.wait_for_completion (); + task.stop (); + return completed ? state.report () : state.report (); + } + + int + prepare_socket_handle (ACE_HANDLE handle) + { + return ACE::set_flags (handle, ACE_NONBLOCK); + } + + int + configure_udp_socket_buffer (ACE_HANDLE handle, + int option_name, + size_t requested_size, + size_t &actual_size) + { + actual_size = 0; + +#if defined (ACE_LACKS_SO_RCVBUF) + if (option_name == SO_RCVBUF) + { + errno = ENOTSUP; + return -1; + } +#endif /* ACE_LACKS_SO_RCVBUF */ + +#if defined (ACE_LACKS_SO_SNDBUF) + if (option_name == SO_SNDBUF) + { + errno = ENOTSUP; + return -1; + } +#endif /* ACE_LACKS_SO_SNDBUF */ + + if (requested_size > 0) + { + if (requested_size > static_cast (INT_MAX)) + { + errno = EINVAL; + return -1; + } + + const int requested = static_cast (requested_size); + if (ACE_OS::setsockopt (handle, + SOL_SOCKET, + option_name, + reinterpret_cast (&requested), + sizeof (requested)) != 0) + return -1; + } + + int actual = 0; + int actual_length = static_cast (sizeof (actual)); + if (ACE_OS::getsockopt (handle, + SOL_SOCKET, + option_name, + reinterpret_cast (&actual), + &actual_length) != 0) + return -1; + + actual_size = actual > 0 ? static_cast (actual) : 0; + return 0; + } + + int + configure_udp_socket_buffers (ACE_HANDLE handle, + const Config &config, + size_t &actual_receive_size, + size_t &actual_send_size) + { + if (configure_udp_socket_buffer (handle, + SO_RCVBUF, + config.udp_receive_buffer, + actual_receive_size) != 0) + return -1; + + if (configure_udp_socket_buffer (handle, + SO_SNDBUF, + config.udp_send_buffer, + actual_send_size) != 0) + return -1; + + return 0; + } + + int + run_udp_benchmark (const Config &config) + { + Proactor_Task task; + if (task.init_backend (config) != 0) + ACE_ERROR_RETURN ((LM_ERROR, + ACE_TEXT ("Failed to start proactor backend %s\n"), + Proactor_Test_Backend::name (config.backend)), + -1); + + Benchmark_State state (config, config.sessions * 2); + + for (size_t index = 0; index < config.sessions; ++index) + { + ACE_INET_Addr server_addr = loopback_addr (0, config.family); + ACE_INET_Addr client_addr = loopback_addr (0, config.family); + ACE_HANDLE server_handle = ACE_INVALID_HANDLE; + ACE_HANDLE client_handle = ACE_INVALID_HANDLE; + + ACE_SOCK_Dgram server_socket; +#if defined (ACE_WIN32) + ACE_SOCK_Dgram client_socket; +#else + ACE_SOCK_CODgram client_socket; +#endif /* ACE_WIN32 */ + + if (server_socket.open (server_addr, config.family) != 0) + { + task.stop (); + ACE_ERROR_RETURN ((LM_ERROR, + ACE_TEXT ("%p\n"), + ACE_TEXT ("server_socket.open")), + -1); + } + + if (server_socket.get_local_addr (server_addr) != 0) + { + task.stop (); + ACE_ERROR_RETURN ((LM_ERROR, + ACE_TEXT ("%p\n"), + ACE_TEXT ("server_socket.get_local_addr")), + -1); + } + + // Win32 async datagram sends use the per-operation peer address. + // POSIX expects the connected datagram socket behavior used + // originally by this benchmark. +#if defined (ACE_WIN32) + if (client_socket.open (client_addr, config.family) != 0) +#else + if (client_socket.open (server_addr, + client_addr, + config.family) != 0) +#endif /* ACE_WIN32 */ + { + task.stop (); + ACE_ERROR_RETURN ((LM_ERROR, + ACE_TEXT ("%p\n"), + ACE_TEXT ("client_socket.open")), + -1); + } + + if (client_socket.get_local_addr (client_addr) != 0) + { + task.stop (); + ACE_ERROR_RETURN ((LM_ERROR, + ACE_TEXT ("%p\n"), + ACE_TEXT ("client_socket.get_local_addr")), + -1); + } + + server_handle = server_socket.get_handle (); + client_handle = client_socket.get_handle (); + server_socket.set_handle (ACE_INVALID_HANDLE); + client_socket.set_handle (ACE_INVALID_HANDLE); + + size_t server_receive_buffer = 0; + size_t server_send_buffer = 0; + size_t client_receive_buffer = 0; + size_t client_send_buffer = 0; + if (configure_udp_socket_buffers (server_handle, + config, + server_receive_buffer, + server_send_buffer) != 0 + || configure_udp_socket_buffers (client_handle, + config, + client_receive_buffer, + client_send_buffer) != 0 + || prepare_socket_handle (server_handle) != 0 + || prepare_socket_handle (client_handle) != 0) + { + ACE_OS::closesocket (server_handle); + ACE_OS::closesocket (client_handle); + task.stop (); + ACE_ERROR_RETURN ((LM_ERROR, + ACE_TEXT ("%p\n"), + ACE_TEXT ("udp socket setup")), + -1); + } + + state.note_udp_socket_buffers (server_receive_buffer, + client_send_buffer); + + Datagram_Endpoint *client = 0; + Datagram_Endpoint *server = 0; + ACE_NEW_RETURN (client, + Datagram_Endpoint (state, + config, + index, + ACE_TEXT ("client"), + server_addr, + true, + false), + -1); + ACE_NEW_RETURN (server, + Datagram_Endpoint (state, + config, + index, + ACE_TEXT ("server"), + client_addr, + false, + true), + -1); + client->start (client_handle); + server->start (server_handle); + } + + if (task.start_threads (config.thread_count) != 0) + { + task.stop (); + ACE_ERROR_RETURN ((LM_ERROR, + ACE_TEXT ("%p\n"), + ACE_TEXT ("activate")), + -1); + } + + const bool completed = state.wait_for_completion (); + task.stop (); + return completed ? state.report () : state.report (); + } +} + +int +run_main (int argc, ACE_TCHAR *argv[]) +{ + ACE_START_TEST (ACE_TEXT ("Proactor_Network_Performance_Test")); + + Config config; + if (parse_args (argc, argv, config) != 0) + { + const int rc = print_usage (argv[0]); + ACE_END_TEST; + return rc; + } + + if (config.family == AF_INET6 && !ACE::ipv6_enabled ()) + { + ACE_DEBUG ((LM_INFO, + ACE_TEXT ("IPv6 is not supported by ACE on this platform.\n") + ACE_TEXT ("Proactor_Network_Performance_Test (IPv6) will not be run.\n"))); + ACE_END_TEST; + return 0; + } + + int rc = 0; + if (config.transport == TRANSPORT_TCP) + rc = run_tcp_benchmark (config); + else + rc = run_udp_benchmark (config); + + ACE_END_TEST; + return rc; +} + +#else + +int +run_main (int, ACE_TCHAR *[]) +{ + ACE_START_TEST (ACE_TEXT ("Proactor_Network_Performance_Test")); + ACE_ERROR ((LM_INFO, + ACE_TEXT ("Threaded Proactor networking is not supported on this platform\n"))); + ACE_END_TEST; + return 0; +} + +#endif /* ACE_HAS_THREADS && (ACE_HAS_WIN32_OVERLAPPED_IO || ACE_HAS_AIO_CALLS) */ diff --git a/ACE/tests/Proactor_Scatter_Gather_Test.cpp b/ACE/tests/Proactor_Scatter_Gather_Test.cpp index d0e75e84e25e2..797b3d7ecd69d 100644 --- a/ACE/tests/Proactor_Scatter_Gather_Test.cpp +++ b/ACE/tests/Proactor_Scatter_Gather_Test.cpp @@ -26,9 +26,8 @@ #include "test_config.h" -#if defined (ACE_HAS_WIN32_OVERLAPPED_IO) - // This currently only works on Win32 platforms (NT SP2 and above). - // Support for Unix platforms supporting POSIX aio calls should be added in future. +#if defined (ACE_HAS_WIN32_OVERLAPPED_IO) || defined (ACE_HAS_AIO_CALLS) + // This requires an async Proactor backend with vectored stream/file I/O. #include "ace/Get_Opt.h" @@ -44,6 +43,8 @@ #include "ace/SOCK_Connector.h" +#include "Proactor_Test_Backend.h" + // For the Acceptor/Connector handlers maintenance lists static const int SENDERS = 1; static const int RECEIVERS = 2; @@ -62,6 +63,7 @@ static const ACE_TCHAR *output_file = ACE_TEXT("output"); static int client_only = 0; static int server_only = 0; static size_t chunk_size = 0; +static Proactor_Test_Backend::Type backend = Proactor_Test_Backend::BACKEND_DEFAULT; enum { @@ -129,6 +131,26 @@ free_chunks_chain (ACE_Message_Block *&mb) mb = 0; } +static size_t +output_write_size (ACE_Message_Block *mb) +{ +#if defined (ACE_WIN32) + return mb->total_size (); +#else + return mb->total_length (); +#endif /* ACE_WIN32 */ +} + +static int +output_write_uses_padding (void) +{ +#if defined (ACE_WIN32) + return 1; +#else + return 0; +#endif /* ACE_WIN32 */ +} + static int last_chunk (ACE_Message_Block *chain, ACE_Message_Block *&last) @@ -339,6 +361,7 @@ class Writer : public ACE_Handler private: int initiate_write_file (void); + void fail_and_end_event_loop (void); private: // Output file @@ -350,6 +373,7 @@ class Writer : public ACE_Handler ACE_Message_Block *even_chain_; long io_count_; char receiver_count_; + bool failed_; }; // ************************************************************* @@ -395,7 +419,8 @@ Receiver::~Receiver (void) if (this->socket_handle_ != ACE_INVALID_HANDLE) ACE_OS::closesocket (this->socket_handle_); - Receiver::writer_->on_delete_receiver (); + if (Receiver::writer_ != 0) + Receiver::writer_->on_delete_receiver (); if (this->partial_chunk_) { @@ -589,11 +614,17 @@ Acceptor::stop (void) { // This method can be called only after proactor event loop is done // in all threads. + this->reissue_accept (0); + (void) this->cancel (); + for (int i = 0; i < RECEIVERS; ++i) { delete this->list_receivers_[i]; this->list_receivers_[i] = 0; } + + delete Receiver::writer_; + Receiver::writer_ = 0; } void @@ -651,7 +682,8 @@ Writer::Writer (void) odd_chain_ (0), even_chain_ (0), io_count_ (0), - receiver_count_ (0) + receiver_count_ (0), + failed_ (false) { } @@ -663,6 +695,12 @@ Writer::~Writer (void) if (this->output_file_handle_ != ACE_INVALID_HANDLE) ACE_OS::close (this->output_file_handle_); + if (this->odd_chain_ != 0) + free_chunks_chain (this->odd_chain_); + + if (this->even_chain_ != 0) + free_chunks_chain (this->even_chain_); + Receiver::writer_ = 0; } @@ -683,23 +721,34 @@ Writer::on_delete_receiver () --this->receiver_count_; + if (this->failed_) + return; + if (0 == this->receiver_count_) { if (this->io_count_ <= 0) // no pending io, so do the work oursleves // (if pending io, they'll see the zero receiver count) - this->initiate_write_file (); + if (this->initiate_write_file () != 0) + this->fail_and_end_event_loop (); } } void Writer::open (void) { +#if defined (ACE_WIN32) + int output_open_flags = O_CREAT | _O_TRUNC | _O_WRONLY; +#else + int output_open_flags = O_CREAT | O_TRUNC | O_WRONLY; +#endif /* ACE_WIN32 */ +#if defined (ACE_WIN32) + output_open_flags |= FILE_FLAG_OVERLAPPED | FILE_FLAG_NO_BUFFERING; +#endif /* ACE_WIN32 */ + // Open the file for output if (ACE_INVALID_HANDLE == (this->output_file_handle_ = ACE_OS::open (output_file, - O_CREAT | _O_TRUNC | _O_WRONLY |\ - FILE_FLAG_OVERLAPPED |\ - FILE_FLAG_NO_BUFFERING, + output_open_flags, ACE_DEFAULT_FILE_PERMS))) ACE_ERROR ((LM_ERROR, ACE_TEXT ("%p\n"), @@ -715,6 +764,12 @@ int Writer::handle_read_chunks_chain (ACE_Message_Block *mb, int type) { + if (this->failed_) + { + free_chunks_chain (mb); + return -1; + } + ACE_DEBUG ((LM_DEBUG, ACE_TEXT ("Writer::handle_read_chunks_chain - (%s) %d bytes\n"), (type == ODD) ? ACE_TEXT ("ODD ") : ACE_TEXT ("EVEN"), @@ -722,7 +777,11 @@ Writer::handle_read_chunks_chain (ACE_Message_Block *mb, add_to_chunks_chain (ODD == type ? this->odd_chain_ : this->even_chain_, mb); - this->initiate_write_file (); + if (this->initiate_write_file () != 0) + { + this->fail_and_end_event_loop (); + return -1; + } return 0; } @@ -730,6 +789,9 @@ Writer::handle_read_chunks_chain (ACE_Message_Block *mb, int Writer::initiate_write_file (void) { + if (this->failed_) + return -1; + // find out how much can we merge ACE_Message_Block *dummy_last = 0; size_t odd_count = last_chunk (this->odd_chain_, dummy_last); @@ -808,17 +870,18 @@ Writer::initiate_write_file (void) // update the remainders of the chains this->odd_chain_ = new_odd_chain_head; this->even_chain_ = new_even_chain_head; - size_t increment_writing_file_offset = united_mb->total_length (); + size_t increment_writing_file_offset = output_write_size (united_mb); + size_t logical_write_size = united_mb->total_length (); // Reconstruct the file - // Write the size, not the length, because we must write in chunks - // of ACE_DEBUG ((LM_DEBUG, - ACE_TEXT ("Writer::initiate_write_file: write %d bytes at %d\n"), - united_mb->total_size (), - this->writing_file_offset_)); + ACE_TEXT ("Writer::initiate_write_file: write %d bytes at %d") + ACE_TEXT (" (logical %d)\n"), + increment_writing_file_offset, + this->writing_file_offset_, + logical_write_size)); if (this->wf_.writev (*united_mb, - united_mb->total_size (), + increment_writing_file_offset, this->writing_file_offset_) == -1) { free_chunks_chain (united_mb); @@ -838,25 +901,70 @@ Writer::initiate_write_file (void) return 0; } +void +Writer::fail_and_end_event_loop (void) +{ + this->failed_ = true; + ACE_Proactor::instance ()->end_event_loop (); +} + void Writer::handle_write_file (const ACE_Asynch_Write_File::Result &result) { ACE_Message_Block *mb = &result.message_block (); + size_t bytes_transferred = result.bytes_transferred (); + size_t bytes_requested = result.bytes_to_write (); ACE_DEBUG ((LM_DEBUG, - ACE_TEXT ("Writer::handle_write_file at offset %d wrote %d\n"), - this->reported_file_offset_, - result.bytes_transferred ())); + ACE_TEXT ("Writer::handle_write_file at offset %d wrote %d") + ACE_TEXT (" of %d bytes\n"), + result.offset (), + bytes_transferred, + bytes_requested)); + + if (result.error () != 0) + { + free_chunks_chain (mb); + --this->io_count_; + + ACE_DEBUG ((LM_DEBUG, + ACE_TEXT ("Writer::handle_write_file") + ACE_TEXT (" - ending proactor event loop after write failure\n"))); + + this->fail_and_end_event_loop (); + return; + } this->reported_file_offset_ += - static_cast (result.bytes_transferred ()); + static_cast (bytes_transferred); + + if (bytes_transferred < bytes_requested) + { + size_t retry_bytes = output_write_uses_padding () + ? bytes_requested - bytes_transferred + : mb->total_length (); + u_long retry_offset = + result.offset () + static_cast (bytes_transferred); + + if (this->wf_.writev (*mb, retry_bytes, retry_offset) == -1) + { + free_chunks_chain (mb); + --this->io_count_; + + ACE_DEBUG ((LM_DEBUG, + ACE_TEXT ("Writer::handle_write_file") + ACE_TEXT (" - ending proactor event loop after retry failure\n"))); + + this->fail_and_end_event_loop (); + } + return; + } - // Always truncate as required, - // because partial will always be the last write to a file ACE_Message_Block *last_mb = mb; last_chunk (mb, last_mb); - if (last_mb->space ()) + if (output_write_uses_padding () && + last_mb->space ()) ACE_OS::truncate (output_file, this->reported_file_offset_ - static_cast (last_mb->space ())); @@ -869,7 +977,18 @@ Writer::handle_write_file (const ACE_Asynch_Write_File::Result &result) if (0 == this->receiver_count_ && 0 == this->io_count_) { - ACE_TEST_ASSERT (0 == this->odd_chain_ && 0 == this->even_chain_); + if (0 != this->odd_chain_ || 0 != this->even_chain_) + { + if (this->initiate_write_file () != 0) + { + ACE_DEBUG ((LM_DEBUG, + ACE_TEXT ("Writer::handle_write_file") + ACE_TEXT (" - ending proactor event loop after final flush failure\n"))); + + this->fail_and_end_event_loop (); + } + return; + } ACE_DEBUG ((LM_DEBUG, ACE_TEXT ("Writer::handle_write_file") @@ -992,6 +1111,7 @@ Connector::stop (void) { // This method can be called only after proactor event loop is done // in all threads. + (void) this->cancel (); for (int i = 0; i < SENDERS; ++i) { @@ -1095,12 +1215,19 @@ Sender::open (ACE_HANDLE handle, ACE_Message_Block &) { this->socket_handle_[ODD] = handle; +#if defined (ACE_WIN32) + int input_open_flags = _O_RDONLY; +#else + int input_open_flags = O_RDONLY; +#endif /* ACE_WIN32 */ +#if defined (ACE_WIN32) + input_open_flags |= FILE_FLAG_OVERLAPPED | FILE_FLAG_NO_BUFFERING; +#endif /* ACE_WIN32 */ + // Open the input file if (ACE_INVALID_HANDLE == (this->input_file_handle_ = ACE_OS::open (input_file, - _O_RDONLY |\ - FILE_FLAG_OVERLAPPED |\ - FILE_FLAG_NO_BUFFERING, + input_open_flags, ACE_DEFAULT_FILE_PERMS))) { ACE_ERROR ((LM_ERROR, @@ -1315,6 +1442,7 @@ print_usage (int /* argc */, ACE_TCHAR *argv[]) ACE_TEXT ("\n-s server only (receiver-writer)") ACE_TEXT ("\n-h host to connect to") ACE_TEXT ("\n-p port") + ACE_TEXT ("\n-t select proactor backend") ACE_TEXT ("\n-u show this message") ACE_TEXT ("\n"), argv[0] @@ -1328,7 +1456,7 @@ parse_args (int argc, ACE_TCHAR *argv[]) if (argc == 1) // no arguments , so one button test return 0; - ACE_Get_Opt get_opt (argc, argv, ACE_TEXT ("f:csh:p:u")); + ACE_Get_Opt get_opt (argc, argv, ACE_TEXT ("f:csh:p:t:u")); int c; while ((c = get_opt ()) != EOF) @@ -1352,6 +1480,10 @@ parse_args (int argc, ACE_TCHAR *argv[]) case 'p': port = ACE_OS::atoi (get_opt.opt_arg ()); break; + case 't': + if (Proactor_Test_Backend::parse_type (get_opt.opt_arg (), backend) != 0) + return print_usage (argc, argv); + break; case 'u': default: return print_usage (argc, argv); @@ -1369,6 +1501,25 @@ run_main (int argc, ACE_TCHAR *argv[]) if (::parse_args (argc, argv) == -1) return -1; + if (!Proactor_Test_Backend::supports_scatter_gather (backend)) + { + ACE_DEBUG ((LM_INFO, + ACE_TEXT ("Asynchronous Scatter/Gather IO is unsupported ") + ACE_TEXT ("for backend '%s'.\n") + ACE_TEXT ("Proactor_Scatter_Gather_Test will not be run.\n"), + Proactor_Test_Backend::name ( + Proactor_Test_Backend::concrete_type (backend)))); + ACE_END_TEST; + return 0; + } + + ACE_Proactor *proactor = 0; + if (Proactor_Test_Backend::create_proactor (backend, 128, proactor, true) != 0) + { + ACE_END_TEST; + return -1; + } + chunk_size = ACE_OS::getpagesize (); if (client_only) @@ -1384,48 +1535,79 @@ run_main (int argc, ACE_TCHAR *argv[]) ACE_TEXT ("Running as server and client, page size %d\n"), chunk_size)); - Acceptor acceptor; - Connector connector; - ACE_INET_Addr addr (port); + int run_status = 0; + { + Acceptor acceptor; + Connector connector; + ACE_INET_Addr addr (port); - if (!client_only) - { - // Simplify, initial read with zero size - if (-1 == acceptor.open (addr, 0, 1)) - { - ACE_TEST_ASSERT (0); - return -1; - } - } + if (!client_only) + { + // This test needs exactly two accepted sockets. Avoid default + // reissue/backlog behavior so teardown does not leave surplus + // asynchronous accepts outstanding on Win32. + if (-1 == acceptor.open (addr, + 0, + 1, + RECEIVERS, + 1, + ACE_Proactor::instance (), + false, + 0, + RECEIVERS)) + { + ACE_TEST_ASSERT (0); + run_status = -1; + } + } - if (!server_only) - { - if (-1 == connector.open (1, ACE_Proactor::instance ())) - { - ACE_TEST_ASSERT (0); - return -1; - } + if (0 == run_status && + !server_only) + { + if (-1 == connector.open (1, ACE_Proactor::instance ())) + { + ACE_TEST_ASSERT (0); + run_status = -1; + } - // connect to first destination - if (addr.set (port, host, 1, addr.get_type ()) == -1) - ACE_ERROR_RETURN ((LM_ERROR, ACE_TEXT ("%p\n"), host), -1); - connector.set_address (addr); - if (-1 == connector.connect (addr)) - { - ACE_TEST_ASSERT (0); - return -1; - } - } + // connect to first destination + if (0 == run_status && + addr.set (port, host, 1, addr.get_type ()) == -1) + { + ACE_ERROR ((LM_ERROR, ACE_TEXT ("%p\n"), host)); + run_status = -1; + } + + if (0 == run_status) + { + connector.set_address (addr); + if (-1 == connector.connect (addr)) + { + ACE_TEST_ASSERT (0); + run_status = -1; + } + } + } - ACE_Proactor::instance ()->run_event_loop (); + if (0 == run_status) + ACE_Proactor::instance ()->run_event_loop (); - // As Proactor event loop now is inactive it is safe to destroy all - // senders + // As Proactor event loop now is inactive it is safe to destroy all + // senders and acceptors before tearing the proactor down. + connector.stop (); + acceptor.stop (); + } - connector.stop (); - acceptor.stop (); + ACE_Proactor::close_singleton (); - ACE_Proactor::instance()->close_singleton (); + if (run_status != 0) + { + if (!client_only) + ACE_OS::unlink (output_file); + + ACE_END_TEST; + return -1; + } // now compare the files - available only when on same machine @@ -1487,4 +1669,4 @@ run_main (int, ACE_TCHAR *[]) return 0; } -#endif /* ACE_HAS_WIN32_OVERLAPPED_IO */ +#endif /* ACE_HAS_WIN32_OVERLAPPED_IO || ACE_HAS_AIO_CALLS */ diff --git a/ACE/tests/Proactor_Stress_Test.cpp b/ACE/tests/Proactor_Stress_Test.cpp new file mode 100644 index 0000000000000..fd38e1d824bdf --- /dev/null +++ b/ACE/tests/Proactor_Stress_Test.cpp @@ -0,0 +1,374 @@ +// ============================================================================ +/** + * @file Proactor_Stress_Test.cpp + * + * Stress coverage for recursive timer-driven dispatch patterns across + * selectable ACE Proactor backends. + */ +// ============================================================================ + +#include "test_config.h" + +#if defined (ACE_HAS_THREADS) && (defined (ACE_HAS_WIN32_OVERLAPPED_IO) || defined (ACE_HAS_AIO_CALLS)) + +#include "ace/Condition_Thread_Mutex.h" +#include "ace/Get_Opt.h" +#include "ace/OS_NS_sys_time.h" +#include "ace/Proactor.h" +#include "ace/Task.h" +#include "ace/Thread_Mutex.h" + +#include "Proactor_Test_Backend.h" + +namespace +{ + double + seconds_between (const ACE_Time_Value &start, + const ACE_Time_Value &end) + { + const ACE_Time_Value delta = end - start; + return static_cast (delta.sec ()) + + (static_cast (delta.usec ()) / 1000000.0); + } + + class Proactor_Task : public ACE_Task + { + public: + Proactor_Task () + : proactor_ (0) + { + } + + ~Proactor_Task () + { + this->stop (); + } + + int start (int thread_count, + Proactor_Test_Backend::Type backend, + size_t max_aio_operations) + { + if (this->proactor_ != 0) + return -1; + + if (Proactor_Test_Backend::create_proactor (backend, + max_aio_operations, + this->proactor_, + true) != 0) + return -1; + + return this->activate (THR_NEW_LWP | THR_JOINABLE, thread_count); + } + + int stop () + { + ACE_Proactor *proactor = this->proactor_; + if (proactor == 0) + return 0; + + proactor->proactor_end_event_loop (); + this->wait (); +#if defined (ACE_WIN32) + ACE_DEBUG ((LM_DEBUG, + ACE_TEXT ("(%t) Skipping ACE_Proactor::close_singleton() on Windows test shutdown\n"))); +#else + ACE_Proactor::close_singleton (); +#endif + this->proactor_ = 0; + return 0; + } + + virtual int svc (void) + { + return ACE_Proactor::instance ()->proactor_run_event_loop (); + } + + private: + ACE_Proactor *proactor_; + }; + + class Recursive_Dispatch_Handler : public ACE_Handler + { + public: + Recursive_Dispatch_Handler (ACE_Proactor &proactor, + size_t dispatch_scale, + size_t target_count) + : proactor_ (proactor) + , lock_ () + , target_reached_ (lock_) + , idle_ (lock_) + , dispatch_scale_ (dispatch_scale) + , target_count_ (target_count) + , call_count_ (0) + , pending_ (0) + , schedule_failures_ (0) + , target_seen_ (false) + { + } + + int start (size_t initial_events) + { + for (size_t index = 0; index < initial_events; ++index) + { + if (this->schedule_one () != 0) + return -1; + } + return 0; + } + + void stop_dispatching () + { + ACE_GUARD (ACE_Thread_Mutex, guard, this->lock_); + this->dispatch_scale_ = 0; + if (this->pending_ == 0) + this->idle_.broadcast (); + } + + bool wait_for_target (const ACE_Time_Value &max_wait) + { + const ACE_Time_Value deadline = ACE_OS::gettimeofday () + max_wait; + ACE_GUARD_RETURN (ACE_Thread_Mutex, guard, this->lock_, false); + while (!this->target_seen_) + { + if (this->target_reached_.wait (&deadline) == -1) + return this->target_seen_; + } + return true; + } + + bool wait_for_idle (const ACE_Time_Value &max_wait) + { + const ACE_Time_Value deadline = ACE_OS::gettimeofday () + max_wait; + ACE_GUARD_RETURN (ACE_Thread_Mutex, guard, this->lock_, false); + while (this->pending_ != 0) + { + if (this->idle_.wait (&deadline) == -1) + return this->pending_ == 0; + } + return true; + } + + size_t call_count () const + { + ACE_GUARD_RETURN (ACE_Thread_Mutex, guard, const_cast (this->lock_), 0); + return this->call_count_; + } + + size_t schedule_failures () const + { + ACE_GUARD_RETURN (ACE_Thread_Mutex, guard, const_cast (this->lock_), 0); + return this->schedule_failures_; + } + + virtual void handle_time_out (const ACE_Time_Value &, const void *) + { + size_t schedule_count = 0; + { + ACE_GUARD (ACE_Thread_Mutex, guard, this->lock_); + if (this->pending_ > 0) + --this->pending_; + + ++this->call_count_; + if (!this->target_seen_ && this->call_count_ >= this->target_count_) + { + this->target_seen_ = true; + this->target_reached_.broadcast (); + } + + schedule_count = this->dispatch_scale_; + if (schedule_count == 0 && this->pending_ == 0) + this->idle_.broadcast (); + } + + for (size_t index = 0; index < schedule_count; ++index) + { + if (this->schedule_one () != 0) + break; + } + } + + private: + int schedule_one () + { + ACE_GUARD_RETURN (ACE_Thread_Mutex, guard, this->lock_, -1); + ++this->pending_; + + long timer_id = this->proactor_.schedule_timer (*this, + 0, + ACE_Time_Value::zero); + if (timer_id == -1) + { + --this->pending_; + ++this->schedule_failures_; + return -1; + } + + return 0; + } + + ACE_Proactor &proactor_; + mutable ACE_Thread_Mutex lock_; + ACE_Condition_Thread_Mutex target_reached_; + ACE_Condition_Thread_Mutex idle_; + size_t dispatch_scale_; + size_t target_count_; + size_t call_count_; + size_t pending_; + size_t schedule_failures_; + bool target_seen_; + }; + + int print_usage (ACE_TCHAR *argv0) + { + ACE_ERROR ((LM_ERROR, + ACE_TEXT ("Usage: %s [-t ]\n"), + argv0)); + return Proactor_Test_Backend::print_type_usage (argv0); + } + + int run_recursive_test (Proactor_Test_Backend::Type backend, + bool immediate_shutdown) + { + const ACE_Time_Value start_time = ACE_OS::gettimeofday (); + Proactor_Task task; + if (task.start (8, backend, 512) != 0) + { + ACE_ERROR_RETURN ((LM_ERROR, + ACE_TEXT ("Failed to start proactor task for backend %s\n"), + Proactor_Test_Backend::name (backend)), + -1); + } + + Recursive_Dispatch_Handler handler (*ACE_Proactor::instance (), 2, 100000); + if (handler.start (2) != 0) + { + task.stop (); + ACE_ERROR_RETURN ((LM_ERROR, + ACE_TEXT ("Failed to seed recursive dispatch for backend %s\n"), + Proactor_Test_Backend::name (backend)), + -1); + } + + if (!handler.wait_for_target (ACE_Time_Value (30))) + { + task.stop (); + ACE_ERROR_RETURN ((LM_ERROR, + ACE_TEXT ("Timed out waiting for target count on backend %s\n"), + Proactor_Test_Backend::name (backend)), + -1); + } + + handler.stop_dispatching (); + + if (!immediate_shutdown && !handler.wait_for_idle (ACE_Time_Value (10))) + { + task.stop (); + ACE_ERROR_RETURN ((LM_ERROR, + ACE_TEXT ("Timed out waiting for idle state on backend %s\n"), + Proactor_Test_Backend::name (backend)), + -1); + } + + task.stop (); + + if (handler.call_count () < 100000) + { + ACE_ERROR_RETURN ((LM_ERROR, + ACE_TEXT ("Expected at least 100000 callbacks for backend %s; got %u\n"), + Proactor_Test_Backend::name (backend), + static_cast (handler.call_count ())), + -1); + } + + if (!immediate_shutdown && handler.schedule_failures () != 0) + { + ACE_ERROR_RETURN ((LM_ERROR, + ACE_TEXT ("Unexpected timer scheduling failures for backend %s: %u\n"), + Proactor_Test_Backend::name (backend), + static_cast (handler.schedule_failures ())), + -1); + } + + const ACE_Time_Value end_time = ACE_OS::gettimeofday (); + const double elapsed = seconds_between (start_time, end_time); + const double safe_elapsed = elapsed > 0.0 ? elapsed : 0.000001; + const double callbacks_per_sec = + static_cast (handler.call_count ()) / safe_elapsed; + + ACE_DEBUG ((LM_INFO, + ACE_TEXT ("PERF_RESULT benchmark=stress backend=%s mode=%s ") + ACE_TEXT ("elapsed_sec=%.6f callbacks=%u callbacks_per_sec=%.3f ") + ACE_TEXT ("schedule_failures=%u\n"), + Proactor_Test_Backend::name (backend), + immediate_shutdown ? ACE_TEXT ("immediate") : ACE_TEXT ("graceful"), + elapsed, + static_cast (handler.call_count ()), + callbacks_per_sec, + static_cast (handler.schedule_failures ()))); + + return 0; + } +} + +int +run_main (int argc, ACE_TCHAR *argv[]) +{ + Proactor_Test_Backend::Type backend = Proactor_Test_Backend::BACKEND_DEFAULT; + ACE_Get_Opt get_opt (argc, argv, ACE_TEXT ("t:u")); + int c = 0; + int parse_failed = 0; + + while ((c = get_opt ()) != EOF) + { + switch (c) + { + case 't': + if (Proactor_Test_Backend::parse_type (get_opt.opt_arg (), backend) == 0) + break; + parse_failed = 1; + break; + case 'u': + default: + parse_failed = 1; + break; + } + + if (parse_failed) + break; + } + + ACE_START_TEST (ACE_TEXT ("Proactor_Stress_Test")); + + if (parse_failed) + { + print_usage (argv[0]); + ACE_END_TEST; + return -1; + } + + int status = 0; + if (run_recursive_test (backend, false) != 0) + status = -1; + else if (run_recursive_test (backend, true) != 0) + status = -1; + + ACE_END_TEST; + return status; +} + +#else + +int +run_main (int, ACE_TCHAR *[]) +{ + ACE_START_TEST (ACE_TEXT ("Proactor_Stress_Test")); + + ACE_DEBUG ((LM_INFO, + ACE_TEXT ("Asynchronous IO is unsupported.\n") + ACE_TEXT ("Proactor_Stress_Test will not be run.\n"))); + + ACE_END_TEST; + return 0; +} + +#endif /* ACE_HAS_THREADS && async io support */ diff --git a/ACE/tests/Proactor_Test.cpp b/ACE/tests/Proactor_Test.cpp index 6424635ae5246..a03a4deec21d4 100644 --- a/ACE/tests/Proactor_Test.cpp +++ b/ACE/tests/Proactor_Test.cpp @@ -42,23 +42,18 @@ #include "ace/Atomic_Op.h" #include "ace/Synch_Traits.h" -#if defined (ACE_WIN32) - -# include "ace/WIN32_Proactor.h" - -#elif defined (ACE_HAS_AIO_CALLS) - -# include "ace/POSIX_Proactor.h" -# include "ace/POSIX_CB_Proactor.h" -# include "ace/SUN_Proactor.h" - -#endif /* ACE_WIN32 */ - #include "Proactor_Test.h" +#include "Proactor_Test_Backend.h" // Proactor Type (UNIX only, Win32 ignored) -typedef enum { DEFAULT = 0, AIOCB, SIG, SUN, CB } ProactorType; +typedef Proactor_Test_Backend::Type ProactorType; +static const ProactorType DEFAULT = Proactor_Test_Backend::BACKEND_DEFAULT; +static const ProactorType AIOCB = Proactor_Test_Backend::BACKEND_AIOCB; +static const ProactorType SIG = Proactor_Test_Backend::BACKEND_SIG; +static const ProactorType SUN = Proactor_Test_Backend::BACKEND_SUN; +static const ProactorType CB = Proactor_Test_Backend::BACKEND_CB; +static const ProactorType URING = Proactor_Test_Backend::BACKEND_URING; static ProactorType proactor_type = DEFAULT; // POSIX : > 0 max number aio operations proactor, @@ -202,80 +197,10 @@ MyTask::create_proactor (ProactorType type_proactor, size_t max_op) -1); ACE_TEST_ASSERT (this->proactor_ == 0); - -#if defined (ACE_WIN32) - - ACE_UNUSED_ARG (type_proactor); - ACE_UNUSED_ARG (max_op); - - ACE_WIN32_Proactor *proactor_impl = 0; - - ACE_NEW_RETURN (proactor_impl, - ACE_WIN32_Proactor, - -1); - - ACE_DEBUG ((LM_DEBUG, - ACE_TEXT("(%t) Create Proactor Type = WIN32\n"))); - -#elif defined (ACE_HAS_AIO_CALLS) - - ACE_POSIX_Proactor * proactor_impl = 0; - - switch (type_proactor) - { - case AIOCB: - ACE_NEW_RETURN (proactor_impl, - ACE_POSIX_AIOCB_Proactor (max_op), - -1); - ACE_DEBUG ((LM_DEBUG, - ACE_TEXT ("(%t) Create Proactor Type = AIOCB\n"))); - break; - -#if defined(ACE_HAS_POSIX_REALTIME_SIGNALS) - case SIG: - ACE_NEW_RETURN (proactor_impl, - ACE_POSIX_SIG_Proactor (max_op), - -1); - ACE_DEBUG ((LM_DEBUG, - ACE_TEXT ("(%t) Create Proactor Type = SIG\n"))); - break; -#endif /* ACE_HAS_POSIX_REALTIME_SIGNALS */ - -# if defined (sun) - case SUN: - ACE_NEW_RETURN (proactor_impl, - ACE_SUN_Proactor (max_op), - -1); - ACE_DEBUG ((LM_DEBUG, - ACE_TEXT("(%t) Create Proactor Type = SUN\n"))); - break; -# endif /* sun */ - -# if !defined(ACE_HAS_BROKEN_SIGEVENT_STRUCT) - case CB: - ACE_NEW_RETURN (proactor_impl, - ACE_POSIX_CB_Proactor (max_op), - -1); - ACE_DEBUG ((LM_DEBUG, - ACE_TEXT ("(%t) Create Proactor Type = CB\n"))); - break; -# endif /* !ACE_HAS_BROKEN_SIGEVENT_STRUCT */ - - default: - ACE_DEBUG ((LM_DEBUG, - ACE_TEXT ("(%t) Create Proactor Type = DEFAULT\n"))); - break; - } - -#endif /* ACE_WIN32 */ - - // always delete implementation 1 , not !(proactor_impl == 0) - ACE_NEW_RETURN (this->proactor_, - ACE_Proactor (proactor_impl, 1 ), - -1); - // Set new singleton and delete it in close_singleton() - ACE_Proactor::instance (this->proactor_, 1); - return 0; + return Proactor_Test_Backend::create_proactor (type_proactor, + max_op, + this->proactor_, + true); } int @@ -289,7 +214,12 @@ MyTask::delete_proactor (void) ACE_DEBUG ((LM_DEBUG, ACE_TEXT ("(%t) Delete Proactor\n"))); +#if defined (ACE_WIN32) + ACE_DEBUG ((LM_DEBUG, + ACE_TEXT ("(%t) Skipping ACE_Proactor::close_singleton() on Windows test shutdown\n"))); +#else ACE_Proactor::close_singleton (); +#endif this->proactor_ = 0; return 0; @@ -707,7 +637,10 @@ Server::~Server (void) this->tester_->server_done (this); if (this->handle_ != ACE_INVALID_HANDLE) - ACE_OS::closesocket (this->handle_); + { + ACE_OS::shutdown (this->handle_, ACE_SHUTDOWN_WRITE); + ACE_OS::closesocket (this->handle_); + } this->id_ = -1; this->handle_= ACE_INVALID_HANDLE; @@ -1547,7 +1480,8 @@ Client::handle_write_stream (const ACE_Asynch_Write_Stream::Result &result) if (result.error () == ERROR_OPERATION_ABORTED) prio = LM_DEBUG; #else - if (result.error () == ECANCELED) + if (result.error () == ECANCELED || + result.error () == EPIPE) prio = LM_DEBUG; #endif /* ACE_WIN32 */ else @@ -1673,7 +1607,8 @@ Client::handle_read_stream (const ACE_Asynch_Read_Stream::Result &result) if (result.error () == ERROR_OPERATION_ABORTED) prio = LM_DEBUG; #else - if (result.error () == ECANCELED) + if (result.error () == ECANCELED || + result.error () == ECONNRESET) prio = LM_DEBUG; #endif /* ACE_WIN32 */ else @@ -1726,6 +1661,7 @@ print_usage (int /* argc */, ACE_TCHAR *argv[]) ACE_TEXT ("\n i SIG") ACE_TEXT ("\n c CB") ACE_TEXT ("\n s SUN") + ACE_TEXT ("\n u URING") ACE_TEXT ("\n d default") ACE_TEXT ("\n-d ") ACE_TEXT ("\n-h for Client mode") @@ -1750,34 +1686,8 @@ print_usage (int /* argc */, ACE_TCHAR *argv[]) static int set_proactor_type (const ACE_TCHAR *ptype) { - if (!ptype) - return 0; - - switch (ACE_OS::ace_toupper (*ptype)) - { - case 'D': - proactor_type = DEFAULT; - return 1; - case 'A': - proactor_type = AIOCB; - return 1; - case 'I': - proactor_type = SIG; - return 1; -#if defined (sun) - case 'S': - proactor_type = SUN; - return 1; -#endif /* sun */ -#if !defined (ACE_HAS_BROKEN_SIGEVENT_STRUCT) - case 'C': - proactor_type = CB; - return 1; -#endif /* !ACE_HAS_BROKEN_SIGEVENT_STRUCT */ - default: - break; - } - return 0; + return Proactor_Test_Backend::parse_type (ptype, proactor_type) == 0 + && Proactor_Test_Backend::is_available (proactor_type) != 0; } static int @@ -1875,11 +1785,16 @@ run_main (int argc, ACE_TCHAR *argv[]) MyTask task1; TestData test; + int started = 0; + int run_status = 0; + Acceptor *acceptor = 0; + Connector *connector = 0; if (task1.start (threads, proactor_type, max_aio_operations) == 0) { - Acceptor acceptor (&test); - Connector connector (&test); + started = 1; + ACE_NEW_RETURN (acceptor, Acceptor (&test), -1); + ACE_NEW_RETURN (connector, Connector (&test), -1); ACE_INET_Addr addr (port); int rc = 0; @@ -1887,7 +1802,7 @@ run_main (int argc, ACE_TCHAR *argv[]) if (both != 0 || host == 0) // Acceptor { // Simplify, initial read with zero size - if (acceptor.open (addr, 0, 1) == 0) + if (acceptor->open (addr, 0, 1) == 0) rc = 1; } @@ -1899,28 +1814,65 @@ run_main (int argc, ACE_TCHAR *argv[]) if (addr.set (port, host, 1, addr.get_type ()) == -1) ACE_ERROR ((LM_ERROR, ACE_TEXT ("%p\n"), host)); else - rc += connector.start (addr, clients); + rc += connector->start (addr, clients); + } + + if (rc <= 0) + { + ACE_ERROR ((LM_ERROR, + ACE_TEXT ("(%t) No Proactor_Test sessions were started.\n"))); + run_status = -1; + } + else + { + // Let the sessions get going, then wait for them to drain while + // the acceptor and connector are still alive. Destroying them + // earlier leaves callbacks racing with stack lifetime. + ACE_OS::sleep (3); + + ACE_DEBUG ((LM_DEBUG, ACE_TEXT ("(%t) Sleeping til sessions run down.\n"))); + ACE_Time_Value limit = ACE_OS::gettimeofday () + ACE_Time_Value (30); + while (!test.testing_done () && ACE_OS::gettimeofday () < limit) + ACE_OS::sleep (1); + + if (!test.testing_done ()) + { + ACE_ERROR ((LM_ERROR, ACE_TEXT ("(%t) Timed out waiting for sessions to run down.\n"))); + run_status = -1; + } } - // Wait a few seconds to let things get going, then poll til - // all sessions are done. Note that when we exit this scope, the - // Acceptor and Connector will be destroyed, which should prevent - // further connections and also test how well destroyed handlers - // are handled. - ACE_OS::sleep (3); + test.stop_all (); + + if (acceptor != 0) + acceptor->cancel (); + if (connector != 0) + connector->cancel (); + ACE_OS::sleep (1); + } + else + { + ACE_ERROR ((LM_ERROR, + ACE_TEXT ("(%t) Failed to start Proactor_Test task.\n"))); + run_status = -1; } - ACE_DEBUG ((LM_DEBUG, ACE_TEXT ("(%t) Sleeping til sessions run down.\n"))); - while (!test.testing_done ()) - ACE_OS::sleep (1); - test.stop_all (); + if (started) + { + // Let any last cancel completions drain before stopping + // the proactor thread. + ACE_OS::sleep (1); + } ACE_DEBUG ((LM_DEBUG, ACE_TEXT ("(%t) Stop Thread Pool Task\n"))); task1.stop (); + delete connector; + delete acceptor; + ACE_END_TEST; - return 0; + return run_status; } #else diff --git a/ACE/tests/Proactor_Test_Backend.h b/ACE/tests/Proactor_Test_Backend.h new file mode 100644 index 0000000000000..866af20d323d7 --- /dev/null +++ b/ACE/tests/Proactor_Test_Backend.h @@ -0,0 +1,449 @@ +// ============================================================================ +/** + * @file Proactor_Test_Backend.h + * + * Shared backend selection support for ACE Proactor tests. + */ +// ============================================================================ + +#ifndef ACE_TESTS_PROACTOR_TEST_BACKEND_H +#define ACE_TESTS_PROACTOR_TEST_BACKEND_H + +#include "ace/Init_ACE.h" +#include "ace/Log_Msg.h" +#include "test_config.h" + +#if defined (ACE_HAS_WIN32_OVERLAPPED_IO) || defined (ACE_HAS_AIO_CALLS) + +#include "ace/Get_Opt.h" +#include "ace/Proactor.h" +#include "ace/OS_NS_ctype.h" +#include "ace/OS_NS_string.h" + +#if defined (ACE_WIN32) +# include "ace/WIN32_Proactor.h" +#endif /* ACE_WIN32 */ + +#if defined (ACE_HAS_AIO_CALLS) +# include "ace/POSIX_Proactor.h" +# include "ace/POSIX_CB_Proactor.h" +# include "ace/SUN_Proactor.h" +#endif /* ACE_HAS_AIO_CALLS */ + +#if defined (ACE_HAS_AIO_CALLS) && defined (ACE_HAS_IO_URING) +# include "ace/Uring_Proactor.h" +#endif /* ACE_HAS_AIO_CALLS && ACE_HAS_IO_URING */ + +namespace Proactor_Test_Backend +{ + /// Canonical backend identifiers accepted by the shared test parser. + enum Type + { + BACKEND_DEFAULT = 0, + BACKEND_WIN32, + BACKEND_AIOCB, + BACKEND_SIG, + BACKEND_SUN, + BACKEND_CB, + BACKEND_URING + }; + + /// Return the canonical command-line name for a backend. + inline const ACE_TCHAR * + name (Type type) + { + switch (type) + { + case BACKEND_WIN32: + return ACE_TEXT ("win32"); + case BACKEND_AIOCB: + return ACE_TEXT ("aiocb"); + case BACKEND_SIG: + return ACE_TEXT ("sig"); + case BACKEND_SUN: + return ACE_TEXT ("sun"); + case BACKEND_CB: + return ACE_TEXT ("cb"); + case BACKEND_URING: + return ACE_TEXT ("uring"); + case BACKEND_DEFAULT: + default: + return ACE_TEXT ("default"); + } + } + + /// Parse a backend name or alias into its enum value. + inline int + parse_type (const ACE_TCHAR *arg, Type &type) + { + if (arg == 0 || arg[0] == 0) + return -1; + + ACE_TCHAR folded[32]; + size_t index = 0; + for (; index + 1 < sizeof (folded) / sizeof (folded[0]) && arg[index] != 0; ++index) + folded[index] = static_cast (ACE_OS::ace_tolower (arg[index])); + folded[index] = 0; + + if (ACE_OS::strcmp (folded, ACE_TEXT ("d")) == 0 + || ACE_OS::strcmp (folded, ACE_TEXT ("default")) == 0) + { + type = BACKEND_DEFAULT; + return 0; + } + if (ACE_OS::strcmp (folded, ACE_TEXT ("w")) == 0 + || ACE_OS::strcmp (folded, ACE_TEXT ("win32")) == 0) + { + type = BACKEND_WIN32; + return 0; + } + if (ACE_OS::strcmp (folded, ACE_TEXT ("a")) == 0 + || ACE_OS::strcmp (folded, ACE_TEXT ("aiocb")) == 0 + || ACE_OS::strcmp (folded, ACE_TEXT ("posix_aiocb")) == 0) + { + type = BACKEND_AIOCB; + return 0; + } + if (ACE_OS::strcmp (folded, ACE_TEXT ("i")) == 0 + || ACE_OS::strcmp (folded, ACE_TEXT ("sig")) == 0 + || ACE_OS::strcmp (folded, ACE_TEXT ("posix_sig")) == 0) + { + type = BACKEND_SIG; + return 0; + } + if (ACE_OS::strcmp (folded, ACE_TEXT ("s")) == 0 + || ACE_OS::strcmp (folded, ACE_TEXT ("sun")) == 0) + { + type = BACKEND_SUN; + return 0; + } + if (ACE_OS::strcmp (folded, ACE_TEXT ("c")) == 0 + || ACE_OS::strcmp (folded, ACE_TEXT ("cb")) == 0 + || ACE_OS::strcmp (folded, ACE_TEXT ("posix_cb")) == 0) + { + type = BACKEND_CB; + return 0; + } + if (ACE_OS::strcmp (folded, ACE_TEXT ("u")) == 0 + || ACE_OS::strcmp (folded, ACE_TEXT ("uring")) == 0) + { + type = BACKEND_URING; + return 0; + } + + return -1; + } + + /// Return non-zero if the backend is available in the current build. + inline int + is_available (Type type) + { + switch (type) + { + case BACKEND_DEFAULT: + return 1; + + case BACKEND_WIN32: +#if defined (ACE_WIN32) && defined (ACE_HAS_WIN32_OVERLAPPED_IO) + return 1; +#else + return 0; +#endif /* ACE_WIN32 && ACE_HAS_WIN32_OVERLAPPED_IO */ + + case BACKEND_AIOCB: +#if defined (ACE_HAS_AIO_CALLS) + return 1; +#else + return 0; +#endif /* ACE_HAS_AIO_CALLS */ + + case BACKEND_SIG: +#if defined (ACE_HAS_AIO_CALLS) && defined (ACE_HAS_POSIX_REALTIME_SIGNALS) + return 1; +#else + return 0; +#endif /* ACE_HAS_AIO_CALLS && ACE_HAS_POSIX_REALTIME_SIGNALS */ + + case BACKEND_SUN: +#if defined (ACE_HAS_AIO_CALLS) && defined (sun) + return 1; +#else + return 0; +#endif /* ACE_HAS_AIO_CALLS && sun */ + + case BACKEND_CB: +#if defined (ACE_HAS_AIO_CALLS) && !defined (ACE_HAS_BROKEN_SIGEVENT_STRUCT) + return 1; +#else + return 0; +#endif /* ACE_HAS_AIO_CALLS && !ACE_HAS_BROKEN_SIGEVENT_STRUCT */ + + case BACKEND_URING: +#if defined (ACE_HAS_AIO_CALLS) && defined (ACE_HAS_IO_URING) + return 1; +#else + return 0; +#endif /* ACE_HAS_AIO_CALLS && ACE_HAS_IO_URING */ + } + + return 0; + } + + /// Resolve the default selector to the backend chosen by the build. + inline Type + concrete_type (Type type) + { + if (type != BACKEND_DEFAULT) + return type; + +#if defined (ACE_HAS_AIO_CALLS) +# if defined (ACE_POSIX_AIOCB_PROACTOR) + return BACKEND_AIOCB; +# elif defined (ACE_POSIX_SIG_PROACTOR) + return BACKEND_SIG; +# else /* Default order: CB, SIG, AIOCB */ +# if !defined (ACE_HAS_BROKEN_SIGEVENT_STRUCT) + return BACKEND_CB; +# else +# if defined (ACE_HAS_POSIX_REALTIME_SIGNALS) + return BACKEND_SIG; +# else + return BACKEND_AIOCB; +# endif /* ACE_HAS_POSIX_REALTIME_SIGNALS */ +# endif /* !ACE_HAS_BROKEN_SIGEVENT_STRUCT */ +# endif /* ACE_POSIX_AIOCB_PROACTOR */ +#elif defined (ACE_WIN32) && defined (ACE_HAS_WIN32_OVERLAPPED_IO) + return BACKEND_WIN32; +#else + return BACKEND_DEFAULT; +#endif /* ACE_HAS_AIO_CALLS */ + } + + /// Return non-zero if the backend supports scatter/gather test coverage. + inline int + supports_scatter_gather (Type type) + { + switch (concrete_type (type)) + { + case BACKEND_WIN32: +#if defined (ACE_WIN32) && defined (ACE_HAS_WIN32_OVERLAPPED_IO) + return 1; +#else + return 0; +#endif /* ACE_WIN32 && ACE_HAS_WIN32_OVERLAPPED_IO */ + + case BACKEND_URING: +#if defined (ACE_HAS_AIO_CALLS) && defined (ACE_HAS_IO_URING) + return 1; +#else + return 0; +#endif /* ACE_HAS_AIO_CALLS && ACE_HAS_IO_URING */ + + default: + return 0; + } + } + + /// Print the shared backend selection usage text for a test executable. + inline int + print_type_usage (ACE_TCHAR *argv0) + { + const ACE_TCHAR *sun_backend = +#if defined (sun) + ACE_TEXT ("\n s sun") +#else + ACE_TEXT ("") +#endif /* sun */ + ; + + const ACE_TCHAR *uring_backend = +#if defined (ACE_HAS_AIO_CALLS) && defined (ACE_HAS_IO_URING) + ACE_TEXT ("\n u uring") +#else + ACE_TEXT ("") +#endif /* ACE_HAS_AIO_CALLS && ACE_HAS_IO_URING */ + ; + + ACE_ERROR ((LM_ERROR, + ACE_TEXT ("%s") + ACE_TEXT ("\n-t :") + ACE_TEXT ("\n d default") + ACE_TEXT ("\n w win32") + ACE_TEXT ("\n a aiocb / posix_aiocb") + ACE_TEXT ("\n i sig / posix_sig") + ACE_TEXT ("\n c cb / posix_cb") + ACE_TEXT ("%s") + ACE_TEXT ("%s") + ACE_TEXT ("\n"), + argv0, + sun_backend, + uring_backend)); + return -1; + } + + /// Report that the requested backend is unavailable in this build. + inline int + unsupported (Type type) + { + ACE_ERROR ((LM_ERROR, + ACE_TEXT ("Requested proactor backend '%s' is unavailable in this build.\n"), + name (type))); + return -1; + } + + /// Create the requested proactor implementation. + inline int + create_impl (Type type, + size_t max_aio_operations, + ACE_Proactor_Impl *&implementation) + { + implementation = 0; + + switch (type) + { + case BACKEND_DEFAULT: +#if defined (ACE_HAS_AIO_CALLS) +# if defined (ACE_POSIX_AIOCB_PROACTOR) + ACE_NEW_RETURN (implementation, + ACE_POSIX_AIOCB_Proactor (max_aio_operations), + -1); + return 0; +# elif defined (ACE_POSIX_SIG_PROACTOR) + ACE_NEW_RETURN (implementation, + ACE_POSIX_SIG_Proactor (max_aio_operations), + -1); + return 0; +# else /* Default order: CB, SIG, AIOCB */ +# if !defined (ACE_HAS_BROKEN_SIGEVENT_STRUCT) + ACE_NEW_RETURN (implementation, + ACE_POSIX_CB_Proactor (max_aio_operations), + -1); + return 0; +# else +# if defined (ACE_HAS_POSIX_REALTIME_SIGNALS) + ACE_NEW_RETURN (implementation, + ACE_POSIX_SIG_Proactor (max_aio_operations), + -1); + return 0; +# else + ACE_NEW_RETURN (implementation, + ACE_POSIX_AIOCB_Proactor (max_aio_operations), + -1); + return 0; +# endif /* ACE_HAS_POSIX_REALTIME_SIGNALS */ +# endif /* !ACE_HAS_BROKEN_SIGEVENT_STRUCT */ +# endif /* ACE_POSIX_AIOCB_PROACTOR */ +#elif defined (ACE_WIN32) && !defined (ACE_HAS_WINCE) + ACE_NEW_RETURN (implementation, + ACE_WIN32_Proactor, + -1); + return 0; +#else + return unsupported (type); +#endif /* ACE_HAS_AIO_CALLS */ + + case BACKEND_WIN32: +#if defined (ACE_WIN32) && defined (ACE_HAS_WIN32_OVERLAPPED_IO) + ACE_NEW_RETURN (implementation, + ACE_WIN32_Proactor, + -1); + return 0; +#else + return unsupported (type); +#endif /* ACE_WIN32 && ACE_HAS_WIN32_OVERLAPPED_IO */ + + case BACKEND_AIOCB: +#if defined (ACE_HAS_AIO_CALLS) + ACE_NEW_RETURN (implementation, + ACE_POSIX_AIOCB_Proactor (max_aio_operations), + -1); + return 0; +#else + return unsupported (type); +#endif /* ACE_HAS_AIO_CALLS */ + + case BACKEND_SIG: +#if defined (ACE_HAS_AIO_CALLS) && defined (ACE_HAS_POSIX_REALTIME_SIGNALS) + ACE_NEW_RETURN (implementation, + ACE_POSIX_SIG_Proactor (max_aio_operations), + -1); + return 0; +#else + return unsupported (type); +#endif /* ACE_HAS_AIO_CALLS && ACE_HAS_POSIX_REALTIME_SIGNALS */ + + case BACKEND_SUN: +#if defined (ACE_HAS_AIO_CALLS) && defined (sun) + ACE_NEW_RETURN (implementation, + ACE_SUN_Proactor (max_aio_operations), + -1); + return 0; +#else + return unsupported (type); +#endif /* ACE_HAS_AIO_CALLS && sun */ + + case BACKEND_CB: +#if defined (ACE_HAS_AIO_CALLS) && !defined (ACE_HAS_BROKEN_SIGEVENT_STRUCT) + ACE_NEW_RETURN (implementation, + ACE_POSIX_CB_Proactor (max_aio_operations), + -1); + return 0; +#else + return unsupported (type); +#endif /* ACE_HAS_AIO_CALLS && !ACE_HAS_BROKEN_SIGEVENT_STRUCT */ + + case BACKEND_URING: +#if defined (ACE_HAS_AIO_CALLS) && defined (ACE_HAS_IO_URING) + { + ACE_Uring_Proactor *uring = 0; + ACE_NEW_RETURN (uring, + ACE_Uring_Proactor (max_aio_operations), + -1); + if (!uring->is_initialized ()) + { + delete uring; + errno = ENODEV; + ACE_ERROR_RETURN ((LM_ERROR, + ACE_TEXT ("Failed to initialize uring proactor\n")), + -1); + } + implementation = uring; + } + return 0; +#else + return unsupported (type); +#endif /* ACE_HAS_AIO_CALLS && ACE_HAS_IO_URING */ + } + + return -1; + } + + /// Create an ACE_Proactor wrapper for the requested backend implementation. + inline int + create_proactor (Type type, + size_t max_aio_operations, + ACE_Proactor *&proactor, + bool install_singleton = true) + { + ACE_Proactor_Impl *implementation = 0; + if (create_impl (type, max_aio_operations, implementation) != 0) + return -1; + + ACE_NEW_NORETURN (proactor, + ACE_Proactor (implementation, 1)); + if (proactor == 0) + { + delete implementation; + return -1; + } + + if (install_singleton) + ACE_Proactor::instance (proactor, 1); + + return 0; + } +} + +#endif /* ACE_HAS_WIN32_OVERLAPPED_IO || ACE_HAS_AIO_CALLS */ + +#endif /* ACE_TESTS_PROACTOR_TEST_BACKEND_H */ diff --git a/ACE/tests/Proactor_Test_IPV6.cpp b/ACE/tests/Proactor_Test_IPV6.cpp index 341c604c30d97..67cd35dc97df3 100644 --- a/ACE/tests/Proactor_Test_IPV6.cpp +++ b/ACE/tests/Proactor_Test_IPV6.cpp @@ -56,10 +56,17 @@ #endif /* defined (ACE_HAS_WIN32_OVERLAPPED_IO) */ #include "Proactor_Test.h" +#include "Proactor_Test_Backend.h" // Proactor Type (UNIX only, Win32 ignored) -typedef enum { DEFAULT = 0, AIOCB, SIG, SUN, CB } ProactorType; +typedef Proactor_Test_Backend::Type ProactorType; +static const ProactorType DEFAULT = Proactor_Test_Backend::BACKEND_DEFAULT; +static const ProactorType AIOCB = Proactor_Test_Backend::BACKEND_AIOCB; +static const ProactorType SIG = Proactor_Test_Backend::BACKEND_SIG; +static const ProactorType SUN = Proactor_Test_Backend::BACKEND_SUN; +static const ProactorType CB = Proactor_Test_Backend::BACKEND_CB; +static const ProactorType URING = Proactor_Test_Backend::BACKEND_URING; static ProactorType proactor_type = DEFAULT; // POSIX : > 0 max number aio operations proactor, @@ -203,80 +210,10 @@ MyTask::create_proactor (ProactorType type_proactor, size_t max_op) -1); ACE_TEST_ASSERT (this->proactor_ == 0); - -#if defined (ACE_WIN32) && !defined (ACE_HAS_WINCE) - - ACE_UNUSED_ARG (type_proactor); - ACE_UNUSED_ARG (max_op); - - ACE_WIN32_Proactor *proactor_impl = 0; - - ACE_NEW_RETURN (proactor_impl, - ACE_WIN32_Proactor, - -1); - - ACE_DEBUG ((LM_DEBUG, - ACE_TEXT("(%t) Create Proactor Type = WIN32\n"))); - -#elif defined (ACE_HAS_AIO_CALLS) - - ACE_POSIX_Proactor * proactor_impl = 0; - - switch (type_proactor) - { - case AIOCB: - ACE_NEW_RETURN (proactor_impl, - ACE_POSIX_AIOCB_Proactor (max_op), - -1); - ACE_DEBUG ((LM_DEBUG, - ACE_TEXT ("(%t) Create Proactor Type = AIOCB\n"))); - break; - -#if defined(ACE_HAS_POSIX_REALTIME_SIGNALS) - case SIG: - ACE_NEW_RETURN (proactor_impl, - ACE_POSIX_SIG_Proactor (max_op), - -1); - ACE_DEBUG ((LM_DEBUG, - ACE_TEXT ("(%t) Create Proactor Type = SIG\n"))); - break; -#endif /* ACE_HAS_POSIX_REALTIME_SIGNALS */ - -# if defined (sun) - case SUN: - ACE_NEW_RETURN (proactor_impl, - ACE_SUN_Proactor (max_op), - -1); - ACE_DEBUG ((LM_DEBUG, - ACE_TEXT("(%t) Create Proactor Type = SUN\n"))); - break; -# endif /* sun */ - -# if !defined(ACE_HAS_BROKEN_SIGEVENT_STRUCT) - case CB: - ACE_NEW_RETURN (proactor_impl, - ACE_POSIX_CB_Proactor (max_op), - -1); - ACE_DEBUG ((LM_DEBUG, - ACE_TEXT ("(%t) Create Proactor Type = CB\n"))); - break; -# endif /* !ACE_HAS_BROKEN_SIGEVENT_STRUCT */ - - default: - ACE_DEBUG ((LM_DEBUG, - ACE_TEXT ("(%t) Create Proactor Type = DEFAULT\n"))); - break; - } - -#endif // (ACE_WIN32) && !defined (ACE_HAS_WINCE) - - // always delete implementation 1 , not !(proactor_impl == 0) - ACE_NEW_RETURN (this->proactor_, - ACE_Proactor (proactor_impl, 1 ), - -1); - // Set new singleton and delete it in close_singleton() - ACE_Proactor::instance (this->proactor_, 1); - return 0; + return Proactor_Test_Backend::create_proactor (type_proactor, + max_op, + this->proactor_, + true); } int @@ -708,7 +645,10 @@ Server::~Server (void) this->tester_->server_done (this); if (this->handle_ != ACE_INVALID_HANDLE) - ACE_OS::closesocket (this->handle_); + { + ACE_OS::shutdown (this->handle_, ACE_SHUTDOWN_WRITE); + ACE_OS::closesocket (this->handle_); + } this->id_ = -1; this->handle_= ACE_INVALID_HANDLE; @@ -1587,7 +1527,8 @@ Client::handle_write_stream (const ACE_Asynch_Write_Stream::Result &result) if (result.error () == ERROR_OPERATION_ABORTED) prio = LM_DEBUG; #else - if (result.error () == ECANCELED) + if (result.error () == ECANCELED || + result.error () == EPIPE) prio = LM_DEBUG; #endif /* ACE_WIN32 */ else @@ -1713,7 +1654,8 @@ Client::handle_read_stream (const ACE_Asynch_Read_Stream::Result &result) if (result.error () == ERROR_OPERATION_ABORTED) prio = LM_DEBUG; #else - if (result.error () == ECANCELED) + if (result.error () == ECANCELED || + result.error () == ECONNRESET) prio = LM_DEBUG; #endif /* ACE_WIN32 */ else @@ -1790,34 +1732,8 @@ print_usage (int /* argc */, ACE_TCHAR *argv[]) static int set_proactor_type (const ACE_TCHAR *ptype) { - if (!ptype) - return 0; - - switch (ACE_OS::ace_toupper (*ptype)) - { - case 'D': - proactor_type = DEFAULT; - return 1; - case 'A': - proactor_type = AIOCB; - return 1; - case 'I': - proactor_type = SIG; - return 1; -#if defined (sun) - case 'S': - proactor_type = SUN; - return 1; -#endif /* sun */ -#if !defined (ACE_HAS_BROKEN_SIGEVENT_STRUCT) - case 'C': - proactor_type = CB; - return 1; -#endif /* !ACE_HAS_BROKEN_SIGEVENT_STRUCT */ - default: - break; - } - return 0; + return Proactor_Test_Backend::parse_type (ptype, proactor_type) == 0 + && Proactor_Test_Backend::is_available (proactor_type) != 0; } static int @@ -1914,17 +1830,42 @@ run_main (int argc, ACE_TCHAR *argv[]) if (::parse_args (argc, argv) == -1) return -1; -#if defined (ACE_HAS_IPV6) + int run_status = 0; + +#if !defined (ACE_HAS_IPV6) + ACE_ERROR ((LM_ERROR, + ACE_TEXT ("IPv6 is not supported by ACE on this platform.\n") + ACE_TEXT ("Proactor_Test_IPV6 requires ACE IPv6 support.\n"))); + + ACE_END_TEST; + + return 1; +#else + if (!ACE::ipv6_enabled ()) + { + ACE_ERROR ((LM_ERROR, + ACE_TEXT ("IPv6 is not supported by ACE on this platform.\n") + ACE_TEXT ("Proactor_Test_IPV6 requires ACE IPv6 support.\n"))); + + ACE_END_TEST; + + return 1; + } + disable_signal (ACE_SIGRTMIN, ACE_SIGRTMAX); disable_signal (SIGPIPE, SIGPIPE); MyTask task1; TestData test; + Acceptor *acceptor = 0; + Connector *connector = 0; + int started = 0; if (task1.start (threads, proactor_type, max_aio_operations) == 0) { - Acceptor acceptor (&test); - Connector connector (&test); + started = 1; + ACE_NEW_RETURN (acceptor, Acceptor (&test), -1); + ACE_NEW_RETURN (connector, Connector (&test), -1); ACE_INET_Addr addr (port, "::"); int rc = 0; @@ -1932,7 +1873,7 @@ run_main (int argc, ACE_TCHAR *argv[]) if (both != 0 || host == 0) // Acceptor { // Simplify, initial read with zero size - if (acceptor.open (addr, 0, 1) == 0) + if (acceptor->open (addr, 0, 1) == 0) rc = 1; } @@ -1944,30 +1885,67 @@ run_main (int argc, ACE_TCHAR *argv[]) if (addr.set (port, host, 1, addr.get_type ()) == -1) ACE_ERROR ((LM_ERROR, ACE_TEXT ("%p\n"), host)); else - rc += connector.start (addr, clients); + rc += connector->start (addr, clients); } - // Wait a few seconds to let things get going, then poll til - // all sessions are done. Note that when we exit this scope, the - // Acceptor and Connector will be destroyed, which should prevent - // further connections and also test how well destroyed handlers - // are handled. - ACE_OS::sleep (3); + if (rc <= 0) + { + ACE_ERROR ((LM_ERROR, + ACE_TEXT ("(%t) No Proactor_Test_IPV6 sessions were started.\n"))); + run_status = -1; + } + else + { + // Let the sessions get going, then wait for them to drain while + // the acceptor and connector are still alive. Destroying them + // earlier leaves callbacks racing with stack lifetime. + ACE_OS::sleep (3); + + ACE_DEBUG ((LM_DEBUG, ACE_TEXT ("(%t) Sleeping til sessions run down.\n"))); + ACE_Time_Value limit = ACE_OS::gettimeofday () + ACE_Time_Value (30); + while (!test.testing_done () && ACE_OS::gettimeofday () < limit) + ACE_OS::sleep (1); + + if (!test.testing_done ()) + { + ACE_ERROR ((LM_ERROR, ACE_TEXT ("(%t) Timed out waiting for sessions to run down.\n"))); + run_status = -1; + } + } + + test.stop_all (); + + if (acceptor != 0) + acceptor->cancel (); + if (connector != 0) + connector->cancel (); + ACE_OS::sleep (1); + } + else + { + ACE_ERROR ((LM_ERROR, + ACE_TEXT ("(%t) Failed to start Proactor_Test_IPV6 task.\n"))); + run_status = -1; } - ACE_DEBUG ((LM_DEBUG, ACE_TEXT ("(%t) Sleeping til sessions run down.\n"))); - while (!test.testing_done ()) - ACE_OS::sleep (1); - test.stop_all (); + if (started) + { + // Let canceled accept/connect completions drain before stopping + // the proactor thread. + ACE_OS::sleep (1); + } ACE_DEBUG ((LM_DEBUG, ACE_TEXT ("(%t) Stop Thread Pool Task\n"))); task1.stop (); -#endif /* ACE_HAS_IPV6 */ + delete connector; + delete acceptor; + +#endif /* !ACE_HAS_IPV6 */ ACE_END_TEST; - return 0; + return run_status; } #else diff --git a/ACE/tests/Proactor_Timer_Test.cpp b/ACE/tests/Proactor_Timer_Test.cpp index e13b01b9ff476..5fa290546f86f 100644 --- a/ACE/tests/Proactor_Timer_Test.cpp +++ b/ACE/tests/Proactor_Timer_Test.cpp @@ -13,20 +13,21 @@ */ //============================================================================= - #include "test_config.h" #include "ace/Trace.h" #if defined (ACE_HAS_WIN32_OVERLAPPED_IO) || defined (ACE_HAS_AIO_CALLS) // This only works on Win32 platforms and on Unix platforms - // supporting POSIX aio calls. + // supporting POSIX aio calls or io_uring. -#include "ace/OS_NS_unistd.h" +#include "ace/OS_NS_sys_time.h" +#include "ace/OS_NS_string.h" #include "ace/Proactor.h" #include "ace/High_Res_Timer.h" #include "ace/Asynch_IO.h" #include "ace/Timer_Heap.h" #include "ace/Auto_Ptr.h" +#include "Proactor_Test_Backend.h" static int done = 0; static size_t counter = 0; @@ -54,31 +55,34 @@ class Time_Handler : public ACE_Handler /* * Need a variant of this that will track if a repeating timer is working - * correctly. This class should be scheduled with a repeating timer that - * repeats on a specified number of seconds. This class will let two - * expirations happen then wait in handle_time_out() longer than the repeat - * time to cause at least one timer expiration to be queued up while we're - * waiting; then cancel the timer. + * correctly. This class is scheduled with a repeating timer and cancels + * itself after two expirations. The test then continues dispatching events + * long enough to verify that canceling the repeat prevents future + * expirations. */ class Repeat_Timer_Handler : public ACE_Handler { public: static const int REPEAT_INTERVAL = 2; - // Constructor arg tells how many seconds we intend to do the repeat with. - // The internals will use this to tell how long to wait in order to cause - // a timer expiration to be missed and queued up. - Repeat_Timer_Handler (const int repeat_time = REPEAT_INTERVAL) - : repeat_secs_ (repeat_time), expirations_ (0) {}; - - ~Repeat_Timer_Handler (); + Repeat_Timer_Handler (void) + : expirations_ (0) + , cancel_result_ (0) + , failed_ (false) + { + } // Handle the timeout. virtual void handle_time_out (const ACE_Time_Value &tv, const void *arg); + int expirations (void) const; + bool failed (void) const; + int cancel_result (void) const; + private: - int repeat_secs_; int expirations_; + int cancel_result_; + bool failed_; }; @@ -129,16 +133,6 @@ Time_Handler::timer_id (long t) this->timer_id_ = t; } -Repeat_Timer_Handler::~Repeat_Timer_Handler () -{ - if (this->expirations_ == 2) - ACE_DEBUG ((LM_DEBUG, ACE_TEXT ("Repeater expired twice; correct\n"))); - else - ACE_ERROR ((LM_ERROR, - ACE_TEXT ("Repeater expired %d times; should be 2\n"), - this->expirations_)); -} - void Repeat_Timer_Handler::handle_time_out (const ACE_Time_Value &, const void *) { @@ -148,18 +142,18 @@ Repeat_Timer_Handler::handle_time_out (const ACE_Time_Value &, const void *) if (this->expirations_ == 2) { - ACE_OS::sleep (this->repeat_secs_ + 1); - int canceled = this->proactor ()->cancel_timer (*this); - if (canceled != 1) + this->cancel_result_ = this->proactor ()->cancel_timer (*this); + if (this->cancel_result_ != 1) { + this->failed_ = true; ACE_ERROR ((LM_ERROR, ACE_TEXT ("Repeater cancel timer: %d; should be 1\n"), - canceled)); + this->cancel_result_)); } - delete this; } else { + this->failed_ = true; ACE_ERROR ((LM_ERROR, ACE_TEXT ("Repeater expiration #%d; should get only 2\n"), this->expirations_)); @@ -167,6 +161,24 @@ Repeat_Timer_Handler::handle_time_out (const ACE_Time_Value &, const void *) return; } +int +Repeat_Timer_Handler::expirations (void) const +{ + return this->expirations_; +} + +bool +Repeat_Timer_Handler::failed (void) const +{ + return this->failed_; +} + +int +Repeat_Timer_Handler::cancel_result (void) const +{ + return this->cancel_result_; +} + static void test_registering_all_handlers (void) { @@ -258,39 +270,103 @@ test_canceling_odd_timers (void) ACE_Proactor::instance ()->handle_events (); } -static void +static int test_cancel_repeat_timer (void) { - Repeat_Timer_Handler *handler = new Repeat_Timer_Handler; + Repeat_Timer_Handler handler; ACE_Time_Value timeout (Repeat_Timer_Handler::REPEAT_INTERVAL); long t_id = ACE_Proactor::instance ()->schedule_repeating_timer - (*handler, 0, timeout); + (handler, 0, timeout); if (t_id == -1) { ACE_ERROR ((LM_ERROR, ACE_TEXT ("%p\n"), ACE_TEXT ("schedule_repeating_timer"))); - delete handler; - return; + return -1; + } + + ACE_Time_Value deadline = ACE_OS::gettimeofday () + + ACE_Time_Value (3 * Repeat_Timer_Handler::REPEAT_INTERVAL); + while (handler.expirations () < 2 && ACE_OS::gettimeofday () < deadline) + { + ACE_Time_Value wait_time (0, 10000); + if (ACE_Proactor::instance ()->handle_events (wait_time) == -1) + { + ACE_ERROR ((LM_ERROR, ACE_TEXT ("%p\n"), ACE_TEXT ("handle_events"))); + ACE_Proactor::instance ()->cancel_timer (handler); + return -1; + } } - ACE_Time_Value test_timer (4 * Repeat_Timer_Handler::REPEAT_INTERVAL); - if (-1 == ACE_Proactor::instance ()->proactor_run_event_loop (test_timer)) - ACE_ERROR ((LM_ERROR, ACE_TEXT ("%p\n"), ACE_TEXT ("proactor loop fail"))); + if (handler.expirations () != 2 || handler.cancel_result () != 1) + { + ACE_ERROR ((LM_ERROR, + ACE_TEXT ("Repeater expired %d times, cancel result %d; ") + ACE_TEXT ("expected 2 expirations and cancel result 1\n"), + handler.expirations (), + handler.cancel_result ())); + ACE_Proactor::instance ()->cancel_timer (handler); + return -1; + } - // handler should be deleted by its own handle_time_out(). - return; + deadline = ACE_OS::gettimeofday () + + ACE_Time_Value (Repeat_Timer_Handler::REPEAT_INTERVAL + 1); + while (!handler.failed () && ACE_OS::gettimeofday () < deadline) + { + ACE_Time_Value wait_time (0, 10000); + if (ACE_Proactor::instance ()->handle_events (wait_time) == -1) + { + ACE_ERROR ((LM_ERROR, ACE_TEXT ("%p\n"), ACE_TEXT ("handle_events"))); + return -1; + } + } + + if (handler.failed () || handler.expirations () != 2) + return -1; + + ACE_DEBUG ((LM_DEBUG, ACE_TEXT ("Repeater expired twice; correct\n"))); + return 0; } -// If any command line arg is given, run the test with high res timer -// queue. Else run it normally. +// If any command line arg is given (other than -t u), run the test with +// high res timer queue. Else run it normally. int -run_main (int argc, ACE_TCHAR *[]) +run_main (int argc, ACE_TCHAR *argv[]) { + ACE_Proactor *proactor = 0; + Proactor_Test_Backend::Type backend = Proactor_Test_Backend::BACKEND_DEFAULT; + ACE_START_TEST (ACE_TEXT ("Proactor_Timer_Test")); - if (argc > 1) + // Determine whether to run with high-res timer queue. Ignore the + // backend selection arguments so they don't accidentally trigger + // this branch. + bool use_hires = false; + for (int i = 1; i < argc; ++i) + { + if (ACE_OS::strcmp (argv[i], ACE_TEXT ("-t")) == 0) + { + if (i + 1 >= argc + || Proactor_Test_Backend::parse_type (argv[i + 1], backend) != 0) + { + Proactor_Test_Backend::print_type_usage (argv[0]); + ACE_END_TEST; + return -1; + } + ++i; // skip the type argument + continue; + } + use_hires = true; + } + + if (Proactor_Test_Backend::create_proactor (backend, 128, proactor, true) != 0) + { + ACE_END_TEST; + return -1; + } + + if (use_hires) { ACE_DEBUG ((LM_DEBUG, ACE_TEXT ("Running with high-res timer queue\n"))); @@ -310,7 +386,7 @@ run_main (int argc, ACE_TCHAR *[]) // code ... typedef ACE_Timer_Heap_T Timer_Queue; - auto_ptr tq(new Timer_Queue); + ACE_Auto_Ptr tq(new Timer_Queue); // ... notice how the policy is in the derived timer queue type. // The abstract timer queue does not have a time policy ... tq->set_time_policy(&ACE_High_Res_Timer::gettimeofday_hr); @@ -333,10 +409,18 @@ run_main (int argc, ACE_TCHAR *[]) // Try canceling handlers with odd numbered timer ids. test_canceling_odd_timers (); - test_cancel_repeat_timer (); + int status = 0; + if (test_cancel_repeat_timer () != 0) + status = -1; +#if defined (ACE_WIN32) + ACE_DEBUG ((LM_DEBUG, + ACE_TEXT ("(%t) Skipping ACE_Proactor::close_singleton() on Windows test shutdown\n"))); +#else + ACE_Proactor::close_singleton (); +#endif ACE_END_TEST; - return 0; + return status; } #else diff --git a/ACE/tests/Proactor_UDP_Test.cpp b/ACE/tests/Proactor_UDP_Test.cpp index bb0ea4863cb7a..e3df6597b1869 100644 --- a/ACE/tests/Proactor_UDP_Test.cpp +++ b/ACE/tests/Proactor_UDP_Test.cpp @@ -51,8 +51,16 @@ #endif /* ACE_WIN32 */ +#include "Proactor_Test_Backend.h" + // Proactor Type (UNIX only, Win32 ignored) -typedef enum { DEFAULT = 0, AIOCB, SIG, SUN, CB } ProactorType; +typedef Proactor_Test_Backend::Type ProactorType; +static const ProactorType DEFAULT = Proactor_Test_Backend::BACKEND_DEFAULT; +static const ProactorType AIOCB = Proactor_Test_Backend::BACKEND_AIOCB; +static const ProactorType SIG = Proactor_Test_Backend::BACKEND_SIG; +static const ProactorType SUN = Proactor_Test_Backend::BACKEND_SUN; +static const ProactorType CB = Proactor_Test_Backend::BACKEND_CB; +static const ProactorType URING = Proactor_Test_Backend::BACKEND_URING; static ProactorType proactor_type = DEFAULT; // POSIX : > 0 max number aio operations proactor, @@ -85,6 +93,8 @@ static int loglevel; // 0 full , 1 only errors static size_t xfer_limit; // Number of bytes for Client to send. +static unsigned long session_drain_timeout = 120; // Seconds. + static char complete_message[] = "GET / HTTP/1.1\r\n" "Accept: */*\r\n" @@ -196,80 +206,10 @@ MyTask::create_proactor (ProactorType type_proactor, size_t max_op) -1); ACE_TEST_ASSERT (this->proactor_ == 0); - -#if defined (ACE_WIN32) - - ACE_UNUSED_ARG (type_proactor); - ACE_UNUSED_ARG (max_op); - - ACE_WIN32_Proactor *proactor_impl = 0; - - ACE_NEW_RETURN (proactor_impl, - ACE_WIN32_Proactor, - -1); - - ACE_DEBUG ((LM_DEBUG, - ACE_TEXT("(%t) Create Proactor Type = WIN32\n"))); - -#elif defined (ACE_HAS_AIO_CALLS) - - ACE_POSIX_Proactor * proactor_impl = 0; - - switch (type_proactor) - { - case AIOCB: - ACE_NEW_RETURN (proactor_impl, - ACE_POSIX_AIOCB_Proactor (max_op), - -1); - ACE_DEBUG ((LM_DEBUG, - ACE_TEXT ("(%t) Create Proactor Type = AIOCB\n"))); - break; - -#if defined(ACE_HAS_POSIX_REALTIME_SIGNALS) - case SIG: - ACE_NEW_RETURN (proactor_impl, - ACE_POSIX_SIG_Proactor (max_op), - -1); - ACE_DEBUG ((LM_DEBUG, - ACE_TEXT ("(%t) Create Proactor Type = SIG\n"))); - break; -#endif /* ACE_HAS_POSIX_REALTIME_SIGNALS */ - -# if defined (sun) - case SUN: - ACE_NEW_RETURN (proactor_impl, - ACE_SUN_Proactor (max_op), - -1); - ACE_DEBUG ((LM_DEBUG, - ACE_TEXT("(%t) Create Proactor Type = SUN\n"))); - break; -# endif /* sun */ - -# if !defined(ACE_HAS_BROKEN_SIGEVENT_STRUCT) - case CB: - ACE_NEW_RETURN (proactor_impl, - ACE_POSIX_CB_Proactor (max_op), - -1); - ACE_DEBUG ((LM_DEBUG, - ACE_TEXT ("(%t) Create Proactor Type = CB\n"))); - break; -# endif /* !ACE_HAS_BROKEN_SIGEVENT_STRUCT */ - - default: - ACE_DEBUG ((LM_DEBUG, - ACE_TEXT ("(%t) Create Proactor Type = DEFAULT\n"))); - break; - } - -#endif /* ACE_WIN32 */ - - // always delete implementation 1 , not !(proactor_impl == 0) - ACE_NEW_RETURN (this->proactor_, - ACE_Proactor (proactor_impl, 1 ), - -1); - // Set new singleton and delete it in close_singleton() - ACE_Proactor::instance (this->proactor_, 1); - return 0; + return Proactor_Test_Backend::create_proactor (type_proactor, + max_op, + this->proactor_, + true); } int @@ -283,7 +223,12 @@ MyTask::delete_proactor (void) ACE_DEBUG ((LM_DEBUG, ACE_TEXT ("(%t) Delete Proactor\n"))); +#if defined (ACE_WIN32) + ACE_DEBUG ((LM_DEBUG, + ACE_TEXT ("(%t) Skipping ACE_Proactor::close_singleton() on Windows test shutdown\n"))); +#else ACE_Proactor::close_singleton (); +#endif this->proactor_ = 0; return 0; @@ -483,6 +428,7 @@ class TestData public: TestData (); bool testing_done (void); + void log_session_counts (void); Server *server_up (void); Client *client_up (void); void server_done (Server *s); @@ -534,6 +480,18 @@ TestData::testing_done (void) return (svr_dn >= svr_up && clt_dn >= clt_up); } +void +TestData::log_session_counts (void) +{ + ACE_ERROR ((LM_ERROR, + ACE_TEXT ("Session progress: ") + ACE_TEXT ("servers up=%d down=%d, clients up=%d down=%d\n"), + this->servers_.sessions_up_.value (), + this->servers_.sessions_down_.value (), + this->clients_.sessions_up_.value (), + this->clients_.sessions_down_.value ())); +} + Server * TestData::server_up (void) { @@ -754,178 +712,170 @@ struct Session_Data // clear it!). So, this bit of messiness is necessary for portability. // When the Master is destroyed, it will try to stop establishing sessions // but this will only work on Windows. -class Master : public ACE_Handler +class Master : public ACE_Task { public: Master (TestData *tester, const ACE_INET_Addr &recv_addr, int expected); ~Master (void); - // Called when dgram receive operation completes. - virtual void handle_read_dgram (const ACE_Asynch_Read_Dgram::Result &result); + void shutdown (void); + virtual int svc (void); private: - void start_recv (void); + void handle_session (const Session_Data &session); TestData *tester_; ACE_INET_Addr recv_addr_; ACE_SOCK_Dgram sock_; - ACE_Asynch_Read_Dgram rd_; - ACE_Message_Block *mb_; ACE_Atomic_Op sessions_expected_; - volatile bool recv_in_progress_; + ACE_Atomic_Op shutting_down_; + int thread_started_; }; // ************************************************************* Master::Master (TestData *tester, const ACE_INET_Addr &recv_addr, int expected) : tester_ (tester), recv_addr_ (recv_addr), - mb_ (0), sessions_expected_ (expected), - recv_in_progress_ (false) + shutting_down_ (0), + thread_started_ (0) { if (this->sock_.open (recv_addr) == -1) ACE_ERROR ((LM_ERROR, ACE_TEXT ("Master socket %p\n"), ACE_TEXT ("open"))); - else + else if (this->activate (THR_NEW_LWP, 1) == -1) { - if (this->rd_.open (*this, this->sock_.get_handle ()) == -1) - ACE_ERROR ((LM_ERROR, - ACE_TEXT ("Master reader %p\n"), - ACE_TEXT ("open"))); - this->mb_ = new ACE_Message_Block (sizeof (Session_Data)); - start_recv (); + ACE_ERROR ((LM_ERROR, + ACE_TEXT ("Master activate %p\n"), + ACE_TEXT ("activate"))); + this->sock_.close (); } + else + this->thread_started_ = 1; } Master::~Master (void) { - if (this->recv_in_progress_) - this->rd_.cancel (); + this->shutdown (); + + if (this->thread_started_ != 0 && this->wait () == -1) + ACE_ERROR ((LM_ERROR, + ACE_TEXT ("Master wait %p\n"), + ACE_TEXT ("wait"))); + this->sock_.close (); +} + +void +Master::shutdown (void) +{ + this->shutting_down_ = 1; +} + +int +Master::svc (void) +{ + ACE_Time_Value timeout (1); - if (this->mb_ != 0) + while (this->shutting_down_.value () == 0 + && this->sessions_expected_.value () > 0) { - this->mb_->release (); - this->mb_ = 0; + Session_Data session; + ACE_INET_Addr remote_addr; + ssize_t received = this->sock_.recv (&session, + sizeof (session), + remote_addr, + 0, + &timeout); + + if (received == -1) + { + if (ACE_OS::last_error () == ETIME) + continue; + + ACE_ERROR ((LM_ERROR, ACE_TEXT ("(%t) Master %p\n"), ACE_TEXT ("recv"))); + continue; + } + + if (static_cast (received) != sizeof (Session_Data)) + { + ACE_ERROR ((LM_ERROR, + ACE_TEXT ("(%t) Master session data expected %B bytes; ") + ACE_TEXT ("received %b\n"), + sizeof (Session_Data), + received)); + continue; + } + + this->handle_session (session); } + + return 0; } void -Master::handle_read_dgram (const ACE_Asynch_Read_Dgram::Result &result) +Master::handle_session (const Session_Data &session) { - // We should only receive Start datagrams with valid addresses to reply to. - if (result.success ()) + if (session.direction_ == 0) { - if (result.bytes_transferred () != sizeof (Session_Data)) - ACE_ERROR ((LM_ERROR, - ACE_TEXT ("(%t) Master session data expected %B bytes; ") - ACE_TEXT ("received %B\n"), - sizeof (Session_Data), - result.bytes_transferred ())); + ACE_INET_Addr client_addr, me_addr; + ACE_TCHAR client_str[80], me_str[80]; + client_addr.set ((u_short)session.port_, session.addr_, 0); + client_addr.addr_to_string (client_str, 80); + + // Set up the local and remote addresses. This is the socket that + // the session will run over. The addressing info to be sent back to + // the Client goes over the well-known receive socket to ensure the + // reply is sent to the client that initiated the session. + ACE_SOCK_CODgram sock; + if (sock.open (client_addr) == -1) + { + ACE_ERROR ((LM_ERROR, + ACE_TEXT ("(%t) Master new socket for ") + ACE_TEXT ("client %s: %p\n"), + client_str, + ACE_TEXT ("open"))); + } else { - ACE_Message_Block *mb = result.message_block (); - Session_Data *session = - reinterpret_cast(mb->rd_ptr ()); - if (session->direction_ == 0) + sock.get_local_addr (me_addr); + me_addr.addr_to_string (me_str, 80); + ACE_DEBUG ((LM_DEBUG, + ACE_TEXT ("(%t) Master setting up server for ") + ACE_TEXT ("local %s, peer %s\n"), + me_str, + client_str)); + + Session_Data ack; + ack.direction_ = 1; // Ack + ack.addr_ = ACE_HTONL (me_addr.get_ip_address ()); + ack.port_ = ACE_HTONS (me_addr.get_port_number ()); + if (this->sock_.send (&ack, + sizeof (ack), + client_addr) == -1) { - ACE_INET_Addr client_addr, me_addr; - ACE_TCHAR client_str[80], me_str[80]; - client_addr.set ((u_short)session->port_, session->addr_, 0); - client_addr.addr_to_string (client_str, 80); - - // Set up the local and remote addresses - need fully-specified - // addresses to use UDP aio on Linux. This is the socket that - // the session will run over. The addressing info to be sent - // back to the Client will be sent over the receive socket - // to ensure it goes back to the client initiating the session. - ACE_SOCK_CODgram sock; - if (sock.open (client_addr) == -1) - { - ACE_ERROR ((LM_ERROR, - ACE_TEXT ("(%t) Master new socket for ") - ACE_TEXT ("client %s: %p\n"), - client_str, - ACE_TEXT ("open"))); - } - else - { - sock.get_local_addr (me_addr); - me_addr.addr_to_string (me_str, 80); - ACE_DEBUG ((LM_DEBUG, - ACE_TEXT ("(%t) Master setting up server for ") - ACE_TEXT ("local %s, peer %s\n"), - me_str, - client_str)); - - Session_Data session; - session.direction_ = 1; // Ack - session.addr_ = ACE_HTONL (me_addr.get_ip_address ()); - session.port_ = ACE_HTONS (me_addr.get_port_number ()); - if (this->sock_.send (&session, - sizeof (session), - client_addr) == -1) - { - ACE_ERROR ((LM_ERROR, - ACE_TEXT ("(%t) Master reply %p\n"), - ACE_TEXT ("send"))); - sock.close (); - } - else - { - Server *server = this->tester_->server_up (); - server->go (sock.get_handle (), client_addr); - } - } - if (--this->sessions_expected_ == 0) - { - ACE_DEBUG ((LM_DEBUG, - ACE_TEXT ("All expected sessions are up\n"))); - } + ACE_ERROR ((LM_ERROR, + ACE_TEXT ("(%t) Master reply %p\n"), + ACE_TEXT ("send"))); + sock.close (); } else { - ACE_ERROR ((LM_ERROR, - ACE_TEXT ("(%t) Badly formed Session request\n"))); + Server *server = this->tester_->server_up (); + server->go (sock.get_handle (), client_addr); } } + + if (--this->sessions_expected_ == 0) + { + ACE_DEBUG ((LM_DEBUG, + ACE_TEXT ("All expected sessions are up\n"))); + } } else { - ACE_Log_Priority prio = LM_ERROR; -#if defined (ACE_WIN32) - if (result.error () == ERROR_OPERATION_ABORTED) - prio = LM_DEBUG; -#else - if (result.error () == ECANCELED) - prio = LM_DEBUG; -#endif /* ACE_WIN32 */ - // Multiple steps to log the error without squashing errno. - ACE_LOG_MSG->conditional_set (__FILE__, - __LINE__, - -1, - (int)(result.error ())); - ACE_LOG_MSG->log (prio, - ACE_TEXT ("(%t) Master %p\n"), - ACE_TEXT ("recv")); - // If canceled, don't try to restart. - if (prio == LM_DEBUG) - return; + ACE_ERROR ((LM_ERROR, + ACE_TEXT ("(%t) Badly formed Session request\n"))); } - this->start_recv (); -} - -void -Master::start_recv (void) -{ - if (this->mb_ == 0) - return; - - size_t unused = 0; - this->mb_->reset (); - if (this->rd_.recv (this->mb_, unused, 0) == -1) - ACE_ERROR ((LM_ERROR, ACE_TEXT ("(%t) Master %p\n"), ACE_TEXT ("recv"))); - else - this->recv_in_progress_ = true; } // *************************************************** @@ -1013,6 +963,8 @@ Server::go (ACE_HANDLE handle, const ACE_INET_Addr &client) // Lock this before initiating I/O, else it may complete while we're // still setting up. + bool has_io = false; + { ACE_GUARD (ACE_SYNCH_MUTEX, monitor, this->lock_); @@ -1026,9 +978,11 @@ Server::go (ACE_HANDLE handle, const ACE_INET_Addr &client) ACE_TEXT ("Server::ACE_Asynch_Read_Dgram::open"))); else this->initiate_read (); + + has_io = this->io_count_ > 0; } - if (this->io_count_ > 0) + if (has_io) return; delete this; // Error setting up I/O factories @@ -2016,34 +1970,8 @@ print_usage (int /* argc */, ACE_TCHAR *argv[]) static int set_proactor_type (const ACE_TCHAR *ptype) { - if (!ptype) - return 0; - - switch (ACE_OS::ace_toupper (*ptype)) - { - case 'D': - proactor_type = DEFAULT; - return 1; - case 'A': - proactor_type = AIOCB; - return 1; - case 'I': - proactor_type = SIG; - return 1; -#if defined (sun) - case 'S': - proactor_type = SUN; - return 1; -#endif /* sun */ -#if !defined (ACE_HAS_BROKEN_SIGEVENT_STRUCT) - case 'C': - proactor_type = CB; - return 1; -#endif /* !ACE_HAS_BROKEN_SIGEVENT_STRUCT */ - default: - break; - } - return 0; + return Proactor_Test_Backend::parse_type (ptype, proactor_type) == 0 + && Proactor_Test_Backend::is_available (proactor_type) != 0; } static int @@ -2141,16 +2069,21 @@ run_main (int argc, ACE_TCHAR *argv[]) MyTask task1; TestData test; + int started = 0; + int result = 0; + Master *master = 0; + Connector *connector = 0; if (task1.start (threads, proactor_type, max_aio_operations) == 0) { + started = 1; // NOTE - there's no real reason this test is limited to IPv4 other // than the way Session_Data is set up - to expand this test to work // on IPv6 as well as IPv4, you need to do some work on passing the // Session_Data address differently. ACE_INET_Addr addr (port, ACE_LOCALHOST, AF_INET); - Master master (&test, addr, clients); - Connector connector (&test); + ACE_NEW_RETURN (master, Master (&test, addr, clients), -1); + ACE_NEW_RETURN (connector, Connector (&test), -1); int rc = 0; if (both != 0 || host == 0) // Acceptor @@ -2167,28 +2100,60 @@ run_main (int argc, ACE_TCHAR *argv[]) if (addr.set (port, host, 1, addr.get_type ()) == -1) ACE_ERROR ((LM_ERROR, ACE_TEXT ("%p\n"), host)); else - rc += connector.start (addr, clients); + rc += connector->start (addr, clients); } - // Wait a few seconds to let things get going, then poll til - // all sessions are done. Note that when we exit this scope, the - // Acceptor and Connector will be destroyed, which should prevent - // further connections and also test how well destroyed handlers - // are handled. + // Let the sessions get going, then wait for them to drain while + // the master and connector are still alive. Destroying them + // earlier leaves callbacks racing with stack lifetime. ACE_OS::sleep (3); + + ACE_DEBUG ((LM_DEBUG, ACE_TEXT ("(%t) Sleeping til sessions run down.\n"))); + ACE_Time_Value const drain_deadline = + ACE_OS::gettimeofday () + ACE_Time_Value (session_drain_timeout); + while (!test.testing_done ()) + { + if (ACE_OS::gettimeofday () >= drain_deadline) + { + ACE_ERROR ((LM_ERROR, + ACE_TEXT ("(%t) Timed out waiting %u seconds ") + ACE_TEXT ("for UDP sessions to drain.\n"), + session_drain_timeout)); + test.log_session_counts (); + result = -1; + break; + } + + ACE_OS::sleep (1); + } + + test.stop_all (); + + if (master != 0) + { + master->shutdown (); + ACE_OS::sleep (1); + } } - ACE_DEBUG ((LM_DEBUG, ACE_TEXT ("(%t) Sleeping til sessions run down.\n"))); - while (!test.testing_done ()) - ACE_OS::sleep (1); + else + result = -1; - test.stop_all (); + if (started) + { + // Let canceled connect completions drain before stopping + // the proactor thread. + ACE_OS::sleep (1); + } ACE_DEBUG ((LM_DEBUG, ACE_TEXT ("(%t) Stop Thread Pool Task\n"))); task1.stop (); + delete connector; + delete master; + ACE_END_TEST; - return 0; + return result; } #else diff --git a/ACE/tests/run_proactor_correctness_matrix.pl b/ACE/tests/run_proactor_correctness_matrix.pl new file mode 100755 index 0000000000000..286650621bb88 --- /dev/null +++ b/ACE/tests/run_proactor_correctness_matrix.pl @@ -0,0 +1,606 @@ +eval '(exit $?0)' && eval 'exec perl -S $0 ${1+"$@"}' + & eval 'exec perl -S $0 $argv:q' + if 0; + +# -*- perl -*- + +use strict; +use warnings; + +use Cwd qw(abs_path); +use Encode qw(encode); +use File::Basename qw(dirname); +use File::Copy qw(copy); +use File::Path qw(make_path); +use Getopt::Long qw(GetOptions); +use MIME::Base64 qw(encode_base64); +use POSIX qw(WNOHANG strftime); +use Text::ParseWords qw(shellwords); +use Time::HiRes qw(sleep time); + +# Return a default value when the requested setting is undefined. +sub value_or_default { + my ($value, $default) = @_; + return defined $value ? $value : $default; +} + +my $script_dir = abs_path(dirname($0)); +my $ace_root = abs_path("$script_dir/.."); +my $log_dir = "$script_dir/log"; +my $matrix_root = "$log_dir/proactor_matrix"; +my $run_id = $ENV{RUN_ID} || strftime('%Y%m%d-%H%M%S', localtime); +my $run_dir = "$matrix_root/$run_id"; +my $is_windows = ($^O eq 'MSWin32') ? 1 : 0; + +$ENV{ACE_ROOT} = $ace_root; +if ($is_windows) { + $ENV{PATH} = join( + ';', + grep { defined $_ && $_ ne '' } + ("$ace_root/lib", $script_dir, $ENV{PATH}) + ); +} else { + $ENV{LD_LIBRARY_PATH} = join( + ':', + grep { defined $_ && $_ ne '' } + ("$ace_root/lib", $script_dir, $ENV{LD_LIBRARY_PATH}) + ); +} + +my $timeout_secs = value_or_default($ENV{TIMEOUT_SECS}, 180); +my $base_port = value_or_default($ENV{BASE_PORT}, 20000); +my $include_default = value_or_default($ENV{INCLUDE_DEFAULT}, 0); +my $include_ipv6 = value_or_default($ENV{INCLUDE_IPV6}, 'auto'); +my $fail_fast = value_or_default($ENV{FAIL_FAST}, 0); +my $run_network_udp = value_or_default($ENV{RUN_NETWORK_UDP}, 1); +my $expected_fail_backends = value_or_default($ENV{EXPECTED_FAIL_BACKENDS}, ''); + +my @requested_tests; +my @requested_backends; +my $list_only = 0; +my $help = 0; + +# Print command-line usage and supported environment variables. +sub usage { + print <<'EOF'; +Usage: ./run_proactor_correctness_matrix.pl [options] + +Options: + --test Run only the named test case. May be repeated. + --backend Run only the named backend. May be repeated. + --list Print the resolved test/backend matrix and exit. + --help Show this help. + +Environment: + BASE_PORT=20000 Starting port for tests that accept -p. + TIMEOUT_SECS=180 Per-test timeout. + RUN_ID=