diff --git a/apps/qsimh_base_cuda.cu b/apps/qsimh_base_cuda.cu new file mode 100644 index 000000000..951ad7688 --- /dev/null +++ b/apps/qsimh_base_cuda.cu @@ -0,0 +1,220 @@ +// Copyright 2026 Google LLC. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include +#include +#include +#include +#include +#include +#include + +#include "../lib/bitstring.h" +#include "../lib/circuit_qsim_parser.h" +#include "../lib/formux.h" +#include "../lib/fuser_basic.h" +#include "../lib/gates_qsim.h" +#include "../lib/io_file.h" +#include "../lib/run_qsimh.h" +#include "../lib/simmux.h" +#include "../lib/simulator_cuda.h" +#include "../lib/util.h" +#include "../lib/util_cpu.h" + +constexpr char usage[] = + "usage:\n ./qsimh_base_cuda.x -c circuit_file " + "-d maximum_time -k part1_qubits " + "-w prefix -p num_prefix_gates -r num_root_gates " + "-t num_threads -n num_dblocks -v verbosity -z\n"; + +struct Options { + std::string circuit_file; + std::vector part1; + uint64_t prefix; + unsigned maxtime = std::numeric_limits::max(); + unsigned num_prefix_gatexs = 0; + unsigned num_root_gatexs = 0; + unsigned num_threads = 256; + unsigned num_dblocks = 16; + unsigned verbosity = 0; + bool denormals_are_zeros = false; +}; + +Options GetOptions(int argc, char* argv[]) { + Options opt; + + int k; + + auto to_int = [](const std::string& word) -> unsigned { + return std::stoul(word); + }; + + while ((k = getopt(argc, argv, "c:d:k:w:p:r:t:n:v:z")) != -1) { + switch (k) { + case 'c': + opt.circuit_file = optarg; + break; + case 'd': + opt.maxtime = std::stoul(optarg); + break; + case 'k': + qsim::SplitString(optarg, ',', to_int, opt.part1); + break; + case 'w': + opt.prefix = std::stoull(optarg); + break; + case 'p': + opt.num_prefix_gatexs = std::stoul(optarg); + break; + case 'r': + opt.num_root_gatexs = std::stoul(optarg); + break; + case 't': + opt.num_threads = std::stoul(optarg); + break; + case 'n': + opt.num_dblocks = std::stoul(optarg); + break; + case 'v': + opt.verbosity = std::stoul(optarg); + break; + case 'z': + opt.denormals_are_zeros = true; + break; + default: + qsim::IO::errorf(usage); + exit(1); + } + } + + return opt; +} + +bool ValidateOptions(const Options& opt) { + if (opt.circuit_file.empty()) { + qsim::IO::errorf("circuit file is not provided.\n"); + qsim::IO::errorf(usage); + return false; + } + + return true; +} + +bool ValidatePart1(unsigned num_qubits, const std::vector& part1) { + for (std::size_t i = 0; i < part1.size(); ++i) { + if (part1[i] >= num_qubits) { + qsim::IO::errorf("part 1 qubit indices are too large.\n"); + return false; + } + } + + return true; +} + +std::vector GetParts( + unsigned num_qubits, const std::vector& part1) { + std::vector parts(num_qubits, 0); + + for (std::size_t i = 0; i < part1.size(); ++i) { + parts[part1[i]] = 1; + } + + return parts; +} + +int main(int argc, char* argv[]) { + using namespace qsim; + + auto opt = GetOptions(argc, argv); + if (!ValidateOptions(opt)) { + return 1; + } + + Circuit> circuit; + if (!CircuitQsimParser::FromFile( + opt.maxtime, opt.circuit_file, circuit)) { + return 1; + } + + if (!ValidatePart1(circuit.num_qubits, opt.part1)) { + return 1; + } + auto parts = GetParts(circuit.num_qubits, opt.part1); + + if (opt.denormals_are_zeros) { + SetFlushToZeroAndDenormalsAreZeros(); + } + + uint64_t num_bitstrings = + std::min(uint64_t{8}, uint64_t{1} << circuit.num_qubits); + + std::vector bitstrings; + bitstrings.reserve(num_bitstrings); + for (std::size_t i = 0; i < num_bitstrings; ++i) { + bitstrings.push_back(i); + } + + struct Factory { + using Simulator = qsim::SimulatorCUDA; + using StateSpace = Simulator::StateSpace; + using fp_type = Simulator::fp_type; + + Factory(const StateSpace::Parameter& param) : param(param) {} + + StateSpace CreateStateSpace() const { return StateSpace(param); } + + Simulator CreateSimulator() const { return Simulator(); } + + const StateSpace::Parameter& param; + }; + + using HybridSimulator = + HybridSimulator, BasicGateFuser, For>; + using Runner = QSimHRunner; + + Runner::Parameter param; + param.prefix = opt.prefix; + param.num_prefix_gatexs = opt.num_prefix_gatexs; + param.num_root_gatexs = opt.num_root_gatexs; + param.num_threads = + opt.num_threads; // This is reused for StateSpaceCUDA params implicitly + // if not careful, but here we separate. + param.verbosity = opt.verbosity; + + std::vector> results(num_bitstrings, 0); + + // Setup CUDA parameters + Factory::StateSpace::Parameter cuda_param; + cuda_param.num_threads = opt.num_threads; + cuda_param.num_dblocks = opt.num_dblocks; + + Factory factory(cuda_param); + + if (Runner::Run(param, factory, circuit, parts, bitstrings, results)) { + static constexpr char const* bits[8] = { + "000", "001", "010", "011", "100", "101", "110", "111", + }; + + unsigned s = 3 - std::min(unsigned{3}, circuit.num_qubits); + + for (std::size_t i = 0; i < num_bitstrings; ++i) { + const auto& a = results[i]; + qsim::IO::messagef( + "%s:%16.8g%16.8g%16.8g\n", bits[i] + s, std::real(a), std::imag(a), + std::norm(a)); + } + } + + return 0; +} diff --git a/lib/simulator_cuda.h b/lib/simulator_cuda.h index 5743bea8b..de9106bcf 100644 --- a/lib/simulator_cuda.h +++ b/lib/simulator_cuda.h @@ -350,8 +350,8 @@ class SimulatorCUDA final { IndicesH d_i(d_ws); - ApplyGateH_Kernel<<>>( - (fp_type*) d_ws, d_i.xss, d_i.ms, state.get()); + ApplyGateH_Kernel<<>>( + (fp_type*)d_ws, d_i.xss, d_i.ms, state.get()); } template @@ -374,8 +374,8 @@ class SimulatorCUDA final { IndicesL d_i(d_ws); - ApplyGateL_Kernel<<>>( - (fp_type*) d_ws, d_i.xss, d_i.ms, d_i.qis, d_i.tis, + ApplyGateL_Kernel<<>>( + (fp_type*)d_ws, d_i.xss, d_i.ms, d_i.qis, d_i.tis, 1 << num_effective_qs, state.get()); } @@ -407,8 +407,8 @@ class SimulatorCUDA final { IndicesH d_i(d_ws); - ApplyControlledGateH_Kernel<<>>( - (fp_type*) d_ws, d_i.xss, d_i.ms, num_aqs + 1, cvalsh, state.get()); + ApplyControlledGateH_Kernel<<>>( + (fp_type*)d_ws, d_i.xss, d_i.ms, num_aqs + 1, cvalsh, state.get()); } template @@ -432,9 +432,9 @@ class SimulatorCUDA final { IndicesL d_i(d_ws); - ApplyControlledGateLH_Kernel<<>>( - (fp_type*) d_ws, d_i.xss, d_i.ms, d_i.qis, d_i.tis, - d.num_aqs + 1, d.cvalsh, 1 << d.num_effective_qs, state.get()); + ApplyControlledGateLH_Kernel<<>>( + (fp_type*)d_ws, d_i.xss, d_i.ms, d_i.qis, d_i.tis, d.num_aqs + 1, + d.cvalsh, 1 << d.num_effective_qs, state.get()); } template @@ -458,8 +458,8 @@ class SimulatorCUDA final { IndicesLC d_i(d_ws); - ApplyControlledGateL_Kernel<<>>( - (fp_type*) d_ws, d_i.xss, d_i.ms, d_i.qis, d_i.tis, d_i.cis, + ApplyControlledGateL_Kernel<<>>( + (fp_type*)d_ws, d_i.xss, d_i.ms, d_i.qis, d_i.tis, d_i.cis, d.num_aqs + 1, d.cvalsh, 1 << d.num_effective_qs, 1 << (5 - d.remaining_low_cqs), state.get()); } @@ -493,9 +493,9 @@ class SimulatorCUDA final { IndicesH d_i(d_ws); - ExpectationValueH_Kernel<<>>( - (fp_type*) d_ws, d_i.xss, d_i.ms, num_iterations_per_block, - state.get(), Plus(), d_res1); + ExpectationValueH_Kernel<<>>( + (fp_type*)d_ws, d_i.xss, d_i.ms, num_iterations_per_block, state.get(), + Plus(), d_res1); double mul = size == 1 ? 0.5 : 1.0; @@ -531,8 +531,8 @@ class SimulatorCUDA final { IndicesL d_i(d_ws); - ExpectationValueL_Kernel<<>>( - (fp_type*) d_ws, d_i.xss, d_i.ms, d_i.qis, d_i.tis, + ExpectationValueL_Kernel<<>>( + (fp_type*)d_ws, d_i.xss, d_i.ms, d_i.qis, d_i.tis, num_iterations_per_block, state.get(), Plus(), d_res1); double mul = double(1 << (5 + num_effective_qs - G)) / 32; @@ -895,6 +895,12 @@ class SimulatorCUDA final { return {cvalsh, num_aqs, num_effective_qs, remaining_low_cqs}; } + static dim3 CreateGrid(uint64_t blocks) { + if (blocks <= 65535) return dim3(blocks); + uint32_t x = 65535; + uint32_t y = (blocks + x - 1) / x; + return dim3(x, y); + } void* AllocScratch(uint64_t size) const { if (size > scratch_size_) { diff --git a/lib/simulator_cuda_kernels.h b/lib/simulator_cuda_kernels.h index e21a9d62e..cfc9419b1 100644 --- a/lib/simulator_cuda_kernels.h +++ b/lib/simulator_cuda_kernels.h @@ -33,6 +33,9 @@ __global__ void ApplyGateH_Kernel( const idx_type* __restrict__ mss, fp_type* __restrict__ rstate) { // blockDim.x must be equal to 64. + uint64_t blockId = uint64_t{blockIdx.z} * gridDim.x * gridDim.y + + uint64_t{blockIdx.y} * gridDim.x + blockIdx.x; + static_assert(G < 7, "gates acting on more than 6 qubits are not supported."); constexpr unsigned gsize = 1 << G; @@ -61,7 +64,7 @@ __global__ void ApplyGateH_Kernel( __syncthreads(); - idx_type i = (64 * idx_type{blockIdx.x} + threadIdx.x) & 0xffffffffffe0; + idx_type i = (64 * idx_type{blockId} + threadIdx.x) & 0xffffffffffe0; idx_type ii = i & mss[0]; for (unsigned j = 1; j <= G; ++j) { i *= 2; @@ -115,6 +118,9 @@ __global__ void ApplyGateL_Kernel( fp_type* __restrict__ rstate) { // blockDim.x must be equal to 32. + uint64_t blockId = uint64_t{blockIdx.z} * gridDim.x * gridDim.y + + uint64_t{blockIdx.y} * gridDim.x + blockIdx.x; + static_assert(G < 7, "gates acting on more than 6 qubits are not supported."); constexpr unsigned gsize = 1 << G; @@ -137,7 +143,7 @@ __global__ void ApplyGateL_Kernel( } } - idx_type i = 32 * idx_type{blockIdx.x}; + idx_type i = 32 * idx_type{blockId}; idx_type ii = i & mss[0]; for (unsigned j = 1; j <= G; ++j) { i *= 2; @@ -204,6 +210,9 @@ __global__ void ApplyControlledGateH_Kernel( fp_type* __restrict__ rstate) { // blockDim.x must be equal to 64. + uint64_t blockId = uint64_t{blockIdx.z} * gridDim.x * gridDim.y + + uint64_t{blockIdx.y} * gridDim.x + blockIdx.x; + static_assert(G < 7, "gates acting on more than 6 qubits are not supported."); constexpr unsigned gsize = 1 << G; @@ -232,7 +241,7 @@ __global__ void ApplyControlledGateH_Kernel( __syncthreads(); - idx_type i = (64 * idx_type{blockIdx.x} + threadIdx.x) & 0xffffffffffe0; + idx_type i = (64 * idx_type{blockId} + threadIdx.x) & 0xffffffffffe0; idx_type ii = i & mss[0]; for (unsigned j = 1; j < num_mss; ++j) { i *= 2; @@ -288,6 +297,9 @@ __global__ void ApplyControlledGateLH_Kernel( unsigned esize, fp_type* __restrict__ rstate) { // blockDim.x must be equal to 32. + uint64_t blockId = uint64_t{blockIdx.z} * gridDim.x * gridDim.y + + uint64_t{blockIdx.y} * gridDim.x + blockIdx.x; + static_assert(G < 7, "gates acting on more than 6 qubits are not supported."); constexpr unsigned gsize = 1 << G; @@ -300,7 +312,7 @@ __global__ void ApplyControlledGateLH_Kernel( __shared__ fp_type rs0[32][gsize + 1], is0[32][gsize + 1]; __shared__ fp_type v[2 * gsize * rows]; - idx_type i = 32 * idx_type{blockIdx.x}; + idx_type i = 32 * idx_type{blockId}; idx_type ii = i & mss[0]; for (unsigned j = 1; j < num_mss; ++j) { i *= 2; @@ -381,6 +393,9 @@ __global__ void ApplyControlledGateL_Kernel( fp_type* __restrict__ rstate) { // blockDim.x must be equal to 32. + uint64_t blockId = uint64_t{blockIdx.z} * gridDim.x * gridDim.y + + uint64_t{blockIdx.y} * gridDim.x + blockIdx.x; + static_assert(G < 7, "gates acting on more than 6 qubits are not supported."); constexpr unsigned gsize = 1 << G; @@ -393,7 +408,7 @@ __global__ void ApplyControlledGateL_Kernel( __shared__ fp_type rs0[32][gsize + 1], is0[32][gsize + 1]; __shared__ fp_type v[2 * gsize * rows]; - idx_type i = 32 * idx_type{blockIdx.x}; + idx_type i = 32 * idx_type{blockId}; idx_type ii = i & mss[0]; for (unsigned j = 1; j < num_mss; ++j) { i *= 2; @@ -477,6 +492,9 @@ __global__ void ExpectationValueH_Kernel( const fp_type* __restrict__ rstate, Op op, cfp_type* __restrict__ result) { // blockDim.x must be equal to 64. + uint64_t blockId = uint64_t{blockIdx.z} * gridDim.x * gridDim.y + + uint64_t{blockIdx.y} * gridDim.x + blockIdx.x; + static_assert(G < 7, "gates acting on more than 6 qubits are not supported."); constexpr unsigned gsize = 1 << G; @@ -508,7 +526,7 @@ __global__ void ExpectationValueH_Kernel( double im = 0; for (unsigned iter = 0; iter < num_iterations_per_block; ++iter) { - idx_type b = num_iterations_per_block * idx_type{blockIdx.x} + iter; + idx_type b = num_iterations_per_block * idx_type{blockId} + iter; idx_type i = (64 * b + threadIdx.x) & 0xffffffffffe0; idx_type ii = i & mss[0]; @@ -573,8 +591,8 @@ __global__ void ExpectationValueH_Kernel( __syncthreads(); if (threadIdx.x == 0) { - result[blockIdx.x].re = partial2[0].re + partial2[1].re; - result[blockIdx.x].im = partial2[0].im + partial2[1].im; + result[blockId].re = partial2[0].re + partial2[1].re; + result[blockId].im = partial2[0].im + partial2[1].im; } } @@ -587,6 +605,9 @@ __global__ void ExpectationValueL_Kernel( const fp_type* __restrict__ rstate, Op op, cfp_type* __restrict__ result) { // blockDim.x must be equal to 32. + uint64_t blockId = uint64_t{blockIdx.z} * gridDim.x * gridDim.y + + uint64_t{blockIdx.y} * gridDim.x + blockIdx.x; + static_assert(G < 7, "gates acting on more than 6 qubits are not supported."); constexpr unsigned gsize = 1 << G; @@ -612,7 +633,7 @@ __global__ void ExpectationValueL_Kernel( double im = 0; for (idx_type iter = 0; iter < num_iterations_per_block; ++iter) { - idx_type i = 32 * (num_iterations_per_block * idx_type{blockIdx.x} + iter); + idx_type i = 32 * (num_iterations_per_block * idx_type{blockId} + iter); idx_type ii = i & mss[0]; for (unsigned j = 1; j <= G; ++j) { i *= 2; @@ -673,8 +694,8 @@ __global__ void ExpectationValueL_Kernel( auto val = WarpReduce(partial[threadIdx.x], op); if (threadIdx.x == 0) { - result[blockIdx.x].re = val.re; - result[blockIdx.x].im = val.im; + result[blockId].re = val.re; + result[blockId].im = val.im; } } diff --git a/lib/statespace_cuda.h b/lib/statespace_cuda.h index 660db074c..865ea006b 100644 --- a/lib/statespace_cuda.h +++ b/lib/statespace_cuda.h @@ -50,6 +50,7 @@ class StateSpaceCUDA : unsigned threads; unsigned dblocks; unsigned blocks; + dim3 device_grid; }; public: @@ -82,6 +83,16 @@ class StateSpaceCUDA : return std::max(uint64_t{64}, 2 * (uint64_t{1} << num_qubits)); }; + bool IsNull(const State& state) const { return state.get() == nullptr; } + + void DeviceSync() const { ErrorCheck(cudaDeviceSynchronize()); } + + void Copy(const State& src, State& dest) const { + ErrorCheck(cudaMemcpy( + dest.get(), src.get(), MinSize(src.num_qubits()) * sizeof(fp_type), + cudaMemcpyDeviceToDevice)); + } + void InternalToNormalOrder(State& state) const { uint64_t size = MinSize(state.num_qubits()) / 2; @@ -89,7 +100,8 @@ class StateSpaceCUDA : unsigned blocks = size / threads; unsigned bytes = 2 * threads * sizeof(fp_type); - InternalToNormalOrderKernel<<>>(state.get()); + InternalToNormalOrderKernel<<>>( + state.get()); ErrorCheck(cudaPeekAtLastError()); ErrorCheck(cudaDeviceSynchronize()); } @@ -101,7 +113,8 @@ class StateSpaceCUDA : unsigned blocks = size / threads; unsigned bytes = 2 * threads * sizeof(fp_type); - NormalToInternalOrderKernel<<>>(state.get()); + NormalToInternalOrderKernel<<>>( + state.get()); ErrorCheck(cudaPeekAtLastError()); ErrorCheck(cudaDeviceSynchronize()); } @@ -121,7 +134,8 @@ class StateSpaceCUDA : fp_type v = double{1} / std::sqrt(hsize); - SetStateUniformKernel<<>>(v, hsize, state.get()); + SetStateUniformKernel<<>>( + v, hsize, state.get()); ErrorCheck(cudaPeekAtLastError()); ErrorCheck(cudaDeviceSynchronize()); } @@ -180,7 +194,7 @@ class StateSpaceCUDA : unsigned threads = std::min(size, uint64_t{param_.num_threads}); unsigned blocks = size / threads; - BulkSetAmplKernel<<>>( + BulkSetAmplKernel<<>>( mask, bits, re, im, exclude, state.get()); ErrorCheck(cudaPeekAtLastError()); ErrorCheck(cudaDeviceSynchronize()); @@ -197,7 +211,7 @@ class StateSpaceCUDA : unsigned threads = std::min(size, uint64_t{param_.num_threads}); unsigned blocks = size / threads; - AddKernel<<>>(src.get(), dest.get()); + AddKernel<<>>(src.get(), dest.get()); ErrorCheck(cudaPeekAtLastError()); ErrorCheck(cudaDeviceSynchronize()); @@ -211,7 +225,7 @@ class StateSpaceCUDA : unsigned threads = std::min(size, uint64_t{param_.num_threads}); unsigned blocks = size / threads; - MultiplyKernel<<>>(a, state.get()); + MultiplyKernel<<>>(a, state.get()); ErrorCheck(cudaPeekAtLastError()); ErrorCheck(cudaDeviceSynchronize()); } @@ -262,7 +276,7 @@ class StateSpaceCUDA : auto op1 = RealProduct(); auto op2 = Plus(); - Reduce1Kernel<<>>( + Reduce1Kernel<<>>( g1.dblocks, op1, op2, op2, state.get(), state.get(), d_res1); ErrorCheck(cudaPeekAtLastError()); ErrorCheck(cudaDeviceSynchronize()); @@ -278,7 +292,7 @@ class StateSpaceCUDA : auto op3 = Plus(); - Reduce2Kernel<<>>( + Reduce2Kernel<<>>( g2.dblocks, g1.blocks, op3, op3, d_res1, d_res2); ErrorCheck(cudaPeekAtLastError()); ErrorCheck(cudaDeviceSynchronize()); @@ -321,7 +335,8 @@ class StateSpaceCUDA : unsigned threads = std::min(size, uint64_t{param_.num_threads}); unsigned blocks = size / threads; - CollapseKernel<<>>(mr.mask, mr.bits, renorm, state.get()); + CollapseKernel<<>>( + mr.mask, mr.bits, renorm, state.get()); ErrorCheck(cudaPeekAtLastError()); ErrorCheck(cudaDeviceSynchronize()); } @@ -337,7 +352,7 @@ class StateSpaceCUDA : auto op1 = RealProduct(); auto op2 = Plus(); - Reduce1Kernel<<>>( + Reduce1Kernel<<>>( g.dblocks, op1, op2, op2, state.get(), state.get(), d_res); ErrorCheck(cudaPeekAtLastError()); ErrorCheck(cudaDeviceSynchronize()); @@ -391,6 +406,7 @@ class StateSpaceCUDA : grid.threads = std::min(size, uint64_t{param_.num_threads}); grid.dblocks = std::min(size / grid.threads, uint64_t{param_.num_dblocks}); grid.blocks = size / (grid.threads * grid.dblocks); + grid.device_grid = CreateGrid(grid.blocks); return grid; } @@ -401,6 +417,7 @@ class StateSpaceCUDA : grid.threads = std::min(param_.num_threads, std::max(32U, size)); grid.dblocks = std::max(1U, size / grid.threads); grid.blocks = 1; + grid.device_grid = dim3(1, 1, 1); return grid; } @@ -426,10 +443,10 @@ class StateSpaceCUDA : auto op3 = Plus::type>(); if (mask == 0) { - Reduce1Kernel<<>>( + Reduce1Kernel<<>>( g1.dblocks, op1, op2, op3, state1.get(), state2.get(), d_res1); } else { - Reduce1MaskedKernel<<>>( + Reduce1MaskedKernel<<>>( g1.dblocks, mask, bits, op1, op2, op3, state1.get(), state2.get(), d_res1); } @@ -448,7 +465,7 @@ class StateSpaceCUDA : auto op2 = Plus(); auto op3 = Plus::type>(); - Reduce2Kernel<<>>( + Reduce2Kernel<<>>( g2.dblocks, g1.blocks, op2, op3, d_res1, d_res2); ErrorCheck(cudaPeekAtLastError()); ErrorCheck(cudaDeviceSynchronize()); diff --git a/lib/statespace_cuda_kernels.h b/lib/statespace_cuda_kernels.h index 0bc4ba706..73c3f97d5 100644 --- a/lib/statespace_cuda_kernels.h +++ b/lib/statespace_cuda_kernels.h @@ -39,7 +39,7 @@ __device__ __forceinline__ FP1 BlockReduce1( unsigned warp = threadIdx.x / warp_size; unsigned lane = threadIdx.x % warp_size; - uint64_t k0 = 2 * n * blockIdx.x * blockDim.x + 2 * tid - lane; + uint64_t k0 = 2 * n * qsim::GetBlockId() * blockDim.x + 2 * tid - lane; uint64_t k1 = k0 + 2 * n * blockDim.x; FP1 r; @@ -88,7 +88,7 @@ __device__ __forceinline__ FP1 BlockReduce1Masked( unsigned warp = threadIdx.x / warp_size; unsigned lane = threadIdx.x % warp_size; - uint64_t k0 = 2 * n * blockIdx.x * blockDim.x + 2 * tid - lane; + uint64_t k0 = 2 * n * qsim::GetBlockId() * blockDim.x + 2 * tid - lane; uint64_t k1 = k0 + 2 * n * blockDim.x; FP1 r = 0; @@ -137,7 +137,7 @@ __device__ __forceinline__ FP1 BlockReduce2( FP1* partial1 = (FP1*) shared; unsigned tid = threadIdx.x; - uint64_t k0 = n * blockIdx.x * blockDim.x + tid; + uint64_t k0 = n * qsim::GetBlockId() * blockDim.x + tid; uint64_t k1 = k0 + n * blockDim.x; FP1 r = 0; @@ -185,7 +185,7 @@ __global__ void Reduce1Kernel(uint64_t n, Op1 op1, Op2 op2, Op3 op3, FP1 sum = detail::BlockReduce1(n, op1, op2, op3, s1, s2); if (threadIdx.x == 0) { - result[blockIdx.x] = sum; + result[qsim::GetBlockId()] = sum; } } @@ -198,7 +198,7 @@ __global__ void Reduce1MaskedKernel(uint64_t n, uint64_t mask, uint64_t bits, detail::BlockReduce1Masked(n, mask, bits, op1, op2, op3, s1, s2); if (threadIdx.x == 0) { - result[blockIdx.x] = sum; + result[qsim::GetBlockId()] = sum; } } @@ -209,7 +209,7 @@ __global__ void Reduce2Kernel( FP1 sum = detail::BlockReduce2(n, size, op2, op3, s); if (threadIdx.x == 0) { - result[blockIdx.x] = sum; + result[qsim::GetBlockId()] = sum; } } @@ -217,7 +217,7 @@ template __global__ void InternalToNormalOrderKernel(FP* state) { unsigned lane = threadIdx.x % warp_size; unsigned l = 2 * threadIdx.x - lane; - uint64_t k = 2 * uint64_t{blockIdx.x} * blockDim.x + l; + uint64_t k = 2 * uint64_t{qsim::GetBlockId()} * blockDim.x + l; extern __shared__ float shared[]; FP* buf = (FP*) shared; @@ -235,7 +235,7 @@ template __global__ void NormalToInternalOrderKernel(FP* state) { unsigned lane = threadIdx.x % warp_size; unsigned l = 2 * threadIdx.x - lane; - uint64_t k = 2 * uint64_t{blockIdx.x} * blockDim.x + l; + uint64_t k = 2 * uint64_t{qsim::GetBlockId()} * blockDim.x + l; extern __shared__ float shared[]; FP* buf = (FP*) shared; @@ -252,7 +252,7 @@ __global__ void NormalToInternalOrderKernel(FP* state) { template __global__ void SetStateUniformKernel(FP v, uint64_t size, FP* state) { unsigned lane = threadIdx.x % warp_size; - uint64_t k = 2 * (uint64_t{blockIdx.x} * blockDim.x + threadIdx.x) - lane; + uint64_t k = 2 * (uint64_t{qsim::GetBlockId()} * blockDim.x + threadIdx.x) - lane; state[k] = lane < size ? v : 0; state[k + warp_size] = 0; @@ -260,19 +260,19 @@ __global__ void SetStateUniformKernel(FP v, uint64_t size, FP* state) { template __global__ void AddKernel(const FP* state1, FP* state2) { - uint64_t k = uint64_t{blockIdx.x} * blockDim.x + threadIdx.x; + uint64_t k = uint64_t{qsim::GetBlockId()} * blockDim.x + threadIdx.x; state2[k] += state1[k]; } template __global__ void MultiplyKernel(FP a, FP* state) { - uint64_t k = uint64_t{blockIdx.x} * blockDim.x + threadIdx.x; + uint64_t k = uint64_t{qsim::GetBlockId()} * blockDim.x + threadIdx.x; state[k] *= a; } template __global__ void CollapseKernel(uint64_t mask, uint64_t bits, FP r, FP* state) { - uint64_t k1 = uint64_t{blockIdx.x} * blockDim.x + threadIdx.x; + uint64_t k1 = uint64_t{qsim::GetBlockId()} * blockDim.x + threadIdx.x; uint64_t k2 = 2 * k1 - threadIdx.x % warp_size; if ((k1 & mask) == bits) { @@ -287,7 +287,7 @@ __global__ void CollapseKernel(uint64_t mask, uint64_t bits, FP r, FP* state) { template __global__ void BulkSetAmplKernel( uint64_t mask, uint64_t bits, FP re, FP im, bool exclude, FP* state) { - uint64_t k1 = uint64_t{blockIdx.x} * blockDim.x + threadIdx.x; + uint64_t k1 = uint64_t{qsim::GetBlockId()} * blockDim.x + threadIdx.x; uint64_t k2 = 2 * k1 - threadIdx.x % warp_size; bool set = ((k1 & mask) == bits) ^ exclude; diff --git a/lib/vectorspace_cuda.h b/lib/vectorspace_cuda.h index 5cfd4e834..b1b4b81cd 100644 --- a/lib/vectorspace_cuda.h +++ b/lib/vectorspace_cuda.h @@ -86,9 +86,9 @@ class VectorSpaceCUDA { static Vector Create(unsigned num_qubits) { fp_type* p; - auto size = sizeof(fp_type) * Impl::MinSize(num_qubits); + uint64_t size = uint64_t{sizeof(fp_type)} * Impl::MinSize(num_qubits); auto rc = cudaMalloc(&p, size); - + if (rc == cudaSuccess) { return Vector{Pointer{(fp_type*) p, &detail::free}, num_qubits}; } else {