微信公众号搜"智元新知"关注
微信扫一扫可直接关注哦!

如果我使用31个块,为什么减少CUDA失败?

如何解决如果我使用31个块,为什么减少CUDA失败?

以下CUDA代码获取标签列表(0、1、2、3,...),并找到这些标签的权重之和。

为了加速计算,我使用共享内存,以便每个线程保持其自己的运行总和。在计算结束时,我执行了CUB块范围内的缩减,然后将原子添加到全局内存中。

如果我使用少于30个块,cpu和GPU会在结果上达成一致,但是如果我使用超过30个块,则不同意。为什么会这样,我该如何解决

检查代码中的错误代码不会产生任何结果,而cuda-gdb和cuda-memcheck不会显示任何未捕获的错误或内存问题。

我正在使用NVCC v10.1.243并在Nvidia Quadro P2000上运行。

MWE

//Compile with,e.g.,nvcc -I /z/downloads/cub-1.8.0/ cuda_reduction.cu -arch=sm_61
#include <algorithm>
#include <cub/cub.cuh>
#include <thrust/device_vector.h>

#include <random>



__global__ void group_summer(
  const int32_t *const labels,const float *const   weights,const int num_elements,const int num_classes,double *const        sums,uint32_t *const      counts
){
  constexpr int num_threads = 128;
  assert(num_threads==blockDim.x);

  //Get shared memory
  extern __shared__ int s[];
  double   *const sums_shmem = (double*)s;
  uint32_t *const counts_shmem = (uint32_t*)&sums_shmem[num_threads*num_classes];

  double   *const my_sums   = &sums_shmem  [num_classes*threadIdx.x];
  uint32_t *const my_counts = &counts_shmem[num_classes*threadIdx.x];

  for(int i=0;i<num_threads*num_classes;i+=num_threads){
    sums_shmem[i] = 0;
    counts_shmem[i] = 0;
  }
  __syncthreads();

  for(int i=blockIdx.x * blockDim.x + threadIdx.x;i<num_elements;i+=gridDim.x*blockDim.x){
    // printf("Thread %d at %d looking at %d with %f at %ld and %ld\n",threadIdx.x,i,labels[i],weights[i],(long int)&my_counts[i],(long int)&my_sums[i]);
    const auto l = labels[i];
    // printf("Before thread %d at %d Now has %d counts and %lf sums\n",my_counts[l],my_sums[l]);
    my_sums[l] += weights[i];
    my_counts[l]++;
    // printf("After thread %d at %d Now has %d counts and %lf sums\n",my_sums[l]);
  }

  __syncthreads();

  __shared__ cub::BlockReduce<double,num_threads>::TempStorage double_temp_storage;
  __shared__ cub::BlockReduce<uint32_t,num_threads>::TempStorage uint32_t_temp_storage;
  for(int l=0;l<num_classes;L++){
    // printf("Thread %d has %d counts with total weight %f for label %d\n",my_sums[l],l);
    const auto sums_total   = cub::BlockReduce<double,num_threads>(double_temp_storage).Reduce(my_sums[l],cub::Sum());
    const auto counts_total = cub::BlockReduce<uint32_t,num_threads>(uint32_t_temp_storage).Reduce(my_counts[l],cub::Sum());
    if(threadIdx.x==0){
      atomicAdd(&sums[l],sums_total);
      atomicAdd(&counts[l],counts_total);
    }
  }
}

void group_summer_cpu(
  const std::vector<int32_t> &labels,const std::vector<float>   &weights,std::vector<double>    &sums,std::vector<uint32_t>  &counts
){
  for(int i=0;i<labels.size();i++){
    const auto l = labels[i];
    sums[l] += weights[i];
    counts[l]++;
  }
}

template<class T>
bool vec_nearly_equal(const std::vector<T> &a,const std::vector<T> &b){
  if(a.size()!=b.size())
    return false;

  for(size_t i=0;i<a.size();i++){
    if(std::abs(a[i]-b[i])>1e-4)
      return false;
  }

  return true;
}

void TestGroupSummer(std::mt19937 &gen,const int N,const int label_max,const int num_blocks){
  std::vector<int32_t> labels(N);
  std::vector<float>   weights(N);

  std::uniform_int_distribution<int> label_dist(0,label_max);
  std::uniform_real_distribution<float> weight_dist(0,5000);

  for(int i=0;i<N;i++){
    labels[i] = label_dist(gen);
    weights[i] = weight_dist(gen);
  }

  // for(const auto &x: labels) std::cout<<x<<" "; std::cout<<std::endl;
  // for(const auto &x: weights) std::cout<<x<<" "; std::cout<<std::endl;

  const int num_classes = 1 + *std::max_element(labels.begin(),labels.end());

  thrust::device_vector<int32_t>   d_labels(labels.size());
  thrust::device_vector<float>     d_weights(labels.size());
  thrust::device_vector<double>    d_sums(num_classes);
  thrust::device_vector<uint32_t>  d_counts(num_classes);

  thrust::copy(labels.begin(),labels.end(),d_labels.begin());
  thrust::copy(weights.begin(),weights.end(),d_weights.begin());

  constexpr int num_threads = 128;
  const int shmem = num_threads * num_classes * (sizeof(double)+sizeof(uint32_t));

  std::cout<<"Num blocks:    "<<num_blocks<<std::endl;
  std::cout<<"Shared memory: "<<shmem<<std::endl;

  group_summer<<<num_blocks,num_threads,shmem>>>(
    thrust::raw_pointer_cast(d_labels.data()),thrust::raw_pointer_cast(d_weights.data()),labels.size(),num_classes,thrust::raw_pointer_cast(d_sums.data()),thrust::raw_pointer_cast(d_counts.data())
  );
  if(cudaGetLastError()!=CUDA_SUCCESS){
    std::cout<<"Kernel Failed to launch!"<<std::endl;
  }
  cudaDeviceSynchronize();
  if(cudaGetLastError()!=CUDA_SUCCESS){
    std::cout<<"Error in kernel!"<<std::endl;
  }

  std::vector<double>   h_sums(num_classes);
  std::vector<uint32_t> h_counts(num_classes);

  thrust::copy(d_sums.begin(),d_sums.end(),h_sums.begin());
  thrust::copy(d_counts.begin(),d_counts.end(),h_counts.begin());

  std::vector<double>   correct_sums(num_classes);
  std::vector<uint32_t> correct_counts(num_classes);

  group_summer_cpu(labels,weights,correct_sums,correct_counts);

  std::cout<<"Sums good? "  <<vec_nearly_equal(h_sums,correct_sums)<<std::endl;
  std::cout<<"Counts good? "<<(h_counts==correct_counts)<<std::endl;

  std::cout<<"GPU Sums: ";   for(const auto &x: h_sums)         std::cout<<x<<" "; std::cout<<std::endl;
  std::cout<<"cpu Sums: ";   for(const auto &x: correct_sums)   std::cout<<x<<" "; std::cout<<std::endl;
  std::cout<<"GPU Counts: "; for(const auto &x: h_counts)       std::cout<<x<<" "; std::cout<<std::endl;
  std::cout<<"cpu Counts: "; for(const auto &x: correct_counts) std::cout<<x<<" "; std::cout<<std::endl;
}


int main(){
  std::mt19937 gen;

  //These all work
  TestGroupSummer(gen,1000000,10,30);
  TestGroupSummer(gen,30);

  //This fails
  TestGroupSummer(gen,31);
}

解决方法

当我在Tesla V100上运行您的代码时,除第一次测试外,所有结果均为失败。

您在这里遇到问题

  for(int i=0;i<num_threads*num_classes;i+=num_threads){
    sums_shmem[i] = 0;
    counts_shmem[i] = 0;
  }

这没有正确地将共享内存清零。您需要将i=0更改为i=threadIdx.x

当我进行更改时,一切对我来说都是过去。

顺便说一句,这是不正确的:

if(cudaGetLastError()!=CUDA_SUCCESS)

CUDA_SUCCESS不是与运行时API一起使用的正确枚举令牌。您应该改用cudaSuccess(有2个实例)。

我还认为您的错误比较容易引起麻烦:

if(std::abs(a[i]-b[i])>1e-4)

,但这似乎不是问题。我通常希望在测试之前能看到一些扩展。

版权声明:本文内容由互联网用户自发贡献,该文观点与技术仅代表作者本人。本站仅提供信息存储空间服务,不拥有所有权,不承担相关法律责任。如发现本站有涉嫌侵权/违法违规的内容, 请发送邮件至 dio@foxmail.com 举报,一经查实,本站将立刻删除。