微信公众号搜"智元新知"关注
微信扫一扫可直接关注哦!

c – 为什么SIMD乘法不比非SIMD乘法更快?

我们假设我们有一个函数,将两个数组乘以1000000双倍.在C/C++中,函数如下所示:
void mul_c(double* a,double* b)
{
    for (int i = 0; i != 1000000; ++i)
    {
        a[i] = a[i] * b[i];
    }
}

编译器使用-O2生成以下程序集:

mul_c(double*,double*):
        xor     eax,eax
.L2:
        movsd   xmm0,QWORD PTR [rdi+rax]
        mulsd   xmm0,QWORD PTR [rsi+rax]
        movsd   QWORD PTR [rdi+rax],xmm0
        add     rax,8
        cmp     rax,8000000
        jne     .L2
        rep ret

从上面的程序集看,编译器使用的是SIMD指令,但它只能乘以每次迭代一次.所以我决定在内联程序中编写相同的函数,而在这里我充分利用了xmm0寄存器,并且一次性增加了两倍:

void mul_asm(double* a,double* b)
{
    asm volatile
    (
        ".intel_Syntax noprefix             \n\t"
        "xor    rax,rax                    \n\t"
        "0:                                 \n\t"
        "movupd xmm0,xmmword ptr [rdi+rax] \n\t"
        "mulpd  xmm0,xmmword ptr [rsi+rax] \n\t"
        "movupd xmmword ptr [rdi+rax],xmm0 \n\t"
        "add    rax,16                     \n\t"
        "cmp    rax,8000000                \n\t"
        "jne    0b                          \n\t"
        ".att_Syntax noprefix               \n\t"

        : 
        : "D" (a),"S" (b)
        : "memory","cc"
    );
}

在为这两个功能分别测量执行时间后,似乎两者都需要1 ms完成:

> gcc -O2 main.cpp
> ./a.out < input

mul_c: 1 ms
mul_asm: 1 ms

[a lot of doubles...]

我预计SIMD实现至少是快速(0毫秒)的两倍,因为只有一半的乘法/存储器指令.

所以我的问题是:当SIMD实现只执行一半的乘法/存储器指令时,为什么SIMD实现不比普通的C/C++实现更快?

这是完整的程序:

#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>

void mul_c(double* a,double* b)
{
    for (int i = 0; i != 1000000; ++i)
    {
        a[i] = a[i] * b[i];
    }
}

void mul_asm(double* a,"cc"
    );
}

int main()
{
    struct timeval t1;
    struct timeval t2;
    unsigned long long time;

    double* a = (double*)malloc(sizeof(double) * 1000000);
    double* b = (double*)malloc(sizeof(double) * 1000000);
    double* c = (double*)malloc(sizeof(double) * 1000000);

    for (int i = 0; i != 1000000; ++i)
    {
        double v;
        scanf("%lf",&v);
        a[i] = v;
        b[i] = v;
        c[i] = v;
    }

    gettimeofday(&t1,NULL);
    mul_c(a,b);
    gettimeofday(&t2,NULL);
    time = 1000 * (t2.tv_sec - t1.tv_sec) + (t2.tv_usec - t1.tv_usec) / 1000;
    printf("mul_c: %llu ms\n",time);

    gettimeofday(&t1,NULL);
    mul_asm(b,c);
    gettimeofday(&t2,NULL);
    time = 1000 * (t2.tv_sec - t1.tv_sec) + (t2.tv_usec - t1.tv_usec) / 1000;
    printf("mul_asm: %llu ms\n\n",time);

    for (int i = 0; i != 1000000; ++i)
    {
        printf("%lf\t\t\t%lf\n",a[i],b[i]);
    }

    return 0;
}

我还试图利用所有xmm寄存器(0-7),并删除指令依赖以获得更好的并行计算:

void mul_asm(double* a,double* b)
{
    asm volatile
    (
        ".intel_Syntax noprefix                 \n\t"
        "xor    rax,rax                        \n\t"
        "0:                                     \n\t"
        "movupd xmm0,xmmword ptr [rdi+rax]     \n\t"
        "movupd xmm1,xmmword ptr [rdi+rax+16]  \n\t"
        "movupd xmm2,xmmword ptr [rdi+rax+32]  \n\t"
        "movupd xmm3,xmmword ptr [rdi+rax+48]  \n\t"
        "movupd xmm4,xmmword ptr [rdi+rax+64]  \n\t"
        "movupd xmm5,xmmword ptr [rdi+rax+80]  \n\t"
        "movupd xmm6,xmmword ptr [rdi+rax+96]  \n\t"
        "movupd xmm7,xmmword ptr [rdi+rax+112] \n\t"
        "mulpd  xmm0,xmmword ptr [rsi+rax]     \n\t"
        "mulpd  xmm1,xmmword ptr [rsi+rax+16]  \n\t"
        "mulpd  xmm2,xmmword ptr [rsi+rax+32]  \n\t"
        "mulpd  xmm3,xmmword ptr [rsi+rax+48]  \n\t"
        "mulpd  xmm4,xmmword ptr [rsi+rax+64]  \n\t"
        "mulpd  xmm5,xmmword ptr [rsi+rax+80]  \n\t"
        "mulpd  xmm6,xmmword ptr [rsi+rax+96]  \n\t"
        "mulpd  xmm7,xmmword ptr [rsi+rax+112] \n\t"
        "movupd xmmword ptr [rdi+rax],xmm0     \n\t"
        "movupd xmmword ptr [rdi+rax+16],xmm1  \n\t"
        "movupd xmmword ptr [rdi+rax+32],xmm2  \n\t"
        "movupd xmmword ptr [rdi+rax+48],xmm3  \n\t"
        "movupd xmmword ptr [rdi+rax+64],xmm4  \n\t"
        "movupd xmmword ptr [rdi+rax+80],xmm5  \n\t"
        "movupd xmmword ptr [rdi+rax+96],xmm6  \n\t"
        "movupd xmmword ptr [rdi+rax+112],xmm7 \n\t"
        "add    rax,128                        \n\t"
        "cmp    rax,8000000                    \n\t"
        "jne    0b                              \n\t"
        ".att_Syntax noprefix                   \n\t"

        : 
        : "D" (a),"cc"
    );
}

但是它仍然运行在1 ms,速度与普通的C/C++实现相同.

更新

如答案/评论所示,我已经实施了另一种测量执行时间的方法

#include <stdio.h>
#include <stdlib.h>

void mul_c(double* a,"cc"
    );
}

void mul_asm2(double* a,"cc"
    );
}

unsigned long timestamp()
{
    unsigned long a;

    asm volatile
    (
        ".intel_Syntax noprefix \n\t"
        "xor   rax,rax         \n\t"
        "xor   rdx,rdx         \n\t"
        "RDTSCP                 \n\t"
        "shl   rdx,32          \n\t"
        "or    rax,rdx         \n\t"
        ".att_Syntax noprefix   \n\t"

        : "=a" (a)
        : 
        : "memory","cc"
    );

    return a;
}

int main()
{
    unsigned long t1;
    unsigned long t2;

    double* a;
    double* b;

    a = (double*)malloc(sizeof(double) * 1000000);
    b = (double*)malloc(sizeof(double) * 1000000);

    for (int i = 0; i != 1000000; ++i)
    {
        double v;
        scanf("%lf",&v);
        a[i] = v;
        b[i] = v;
    }

    t1 = timestamp();
    mul_c(a,b);
    //mul_asm(a,b);
    //mul_asm2(a,b);
    t2 = timestamp();
    printf("mul_c: %lu cycles\n\n",t2 - t1);

    for (int i = 0; i != 1000000; ++i)
    {
        printf("%lf\t\t\t%lf\n",b[i]);
    }

    return 0;
}

当我运行这个测量的程序,我得到这个结果:

mul_c:    ~2163971628 cycles
mul_asm:  ~2532045184 cycles
mul_asm2: ~5230488    cycles <-- what???

有两件事情值得一提,首先,周期数的变化很大,我认为这是因为操作系统允许其他进程在其间运行.在执行程序时,是否有任何方法可以防止或仅计算周期?此外,mul_asm2与其他两个相比产生相同的输出,但是它要快得多吗?

我在我的系统上和我的2个实现一起尝试了Z boson的程序,得到以下结果:

> g++ -O2 -fopenmp main.cpp
> ./a.out
mul         time 1.33,18.08 GB/s
mul_SSE     time 1.13,21.24 GB/s
mul_SSE_NT  time 1.51,15.88 GB/s
mul_SSE_OMP time 0.79,30.28 GB/s
mul_SSE_v2  time 1.12,21.49 GB/s
mul_v2      time 1.26,18.99 GB/s
mul_asm     time 1.12,21.50 GB/s
mul_asm2    time 1.09,22.08 GB/s

解决方法

以前的基准测试有 a major bug in the timing function I used.这大大低估了无矢量化带宽以及其他测量的带宽.此外,还有一个问题是高估了读取但未写入的阵列上的带宽 due to COW.最后,我使用的最大带宽是不正确的.我已经更正了我的答案与更正,我已经离开了这个答案结束的旧答案.

您的操作是内存带宽限制.这意味着cpu大部分时间都在等待缓慢的内存读写.这里可以找到一个很好的解释:Why vectorizing the loop does not have performance improvement.

但是,我不得不同意这个答案中的一个声明.

So regardless of how it’s optimized,(vectorized,unrolled,etc…) it isn’t gonna get much faster.

事实上,即使在内存带宽绑定操作中,矢量化,展开和多线程也可以显着增加带宽.原因是很难获得最大的内存带宽.这里可以找到一个很好的解释:https://stackoverflow.com/a/25187492/2542702.

我的答案的其余部分将显示向量化和多线程可以如何接近最大内存带宽.

我的测试系统:Ubuntu 16.10,Skylake(i7-6700HQ@2.60GHz),32GB RAM,双通道DDR4 @ 2400 GHz.我系统的最大带宽是38.4 GB / s.

从下面的代码生成以下表格.我使用OMP_NUM_THREADS设置线程数量.导出OMP_NUM_THREADS = 4.效率是带宽/ max_bandwidth.

-O2 -march=native -fopenmp
Threads Efficiency
1       59.2%
2       76.6%
4       74.3%
8       70.7%

-O2 -march=native -fopenmp -funroll-loops
1       55.8%
2       76.5%
4       72.1%
8       72.2%

-O3 -march=native -fopenmp
1       63.9%
2       74.6%
4       63.9%
8       63.2%

-O3 -march=native -fopenmp -mprefer-avx128
1       67.8%
2       76.0%
4       63.9%
8       63.2%

-O3 -march=native -fopenmp -mprefer-avx128 -funroll-loops
1       68.8%
2       73.9%
4       69.0%
8       66.8%

由于测量的不确定性,经过多次运行迭代,我已经形成了以下结论:

>单线程标量运算获得超过50%的带宽.
>两个线程标量运算获得最高的带宽.
单线程向量运算比单线程标量运算快.
>单线程SSE操作比单线程AVX操作快.
>展开是没有帮助的.
>展开单线程操作比不展开慢.
>更多线程比内核(超线程)提供更低的带宽.

提供最佳带宽的解决方案是使用两个线程的标量运算.

我用于基准的代码

#include <stdlib.h>
#include <string.h>
#include <stdio.h>
#include <omp.h>

#define N 10000000
#define R 100

void mul(double *a,double *b) {
  #pragma omp parallel for
  for (int i = 0; i<N; i++) a[i] *= b[i];
}

int main() {
  double maxbw = 2.4*2*8; // 2.4GHz * 2-channels * 64-bits * 1-byte/8-bits 
  double mem = 3*sizeof(double)*N*R*1E-9; // GB

  double *a = (double*)malloc(sizeof *a * N);
  double *b = (double*)malloc(sizeof *b * N);

  //due to copy-on-write b must be initialized to get the correct bandwidth
  //also,GCC will convert malloc + memset(0) to calloc so use memset(1)
  memset(b,1,sizeof *b * N);

  double dtime = -omp_get_wtime();
  for(int i=0; i<R; i++) mul(a,b);
  dtime += omp_get_wtime();
  printf("%.2f s,%.1f GB/s,%.1f%%\n",dtime,mem/dtime,100*mem/dtime/maxbw);

  free(a),free(b);
}

具有定时错误的旧解决方

内联汇编的现代解决方案是使用内在函数.仍然有一个需要内联汇编的情况,但这不是其中之一.

一个用于内联汇编方法的内在解决方案是简单的:

void mul_SSE(double*  a,double*  b) {
  for (int i = 0; i<N/2; i++) 
      _mm_store_pd(&a[2*i],_mm_mul_pd(_mm_load_pd(&a[2*i]),_mm_load_pd(&b[2*i])));
}

让我定义一些测试代码

#include <x86intrin.h>
#include <string.h>
#include <stdio.h>
#include <x86intrin.h>
#include <omp.h>

#define N 1000000
#define R 1000

typedef __attribute__(( aligned(32)))  double aligned_double;
void  (*fp)(aligned_double *a,aligned_double *b);

void mul(aligned_double* __restrict a,aligned_double* __restrict b) {
  for (int i = 0; i<N; i++) a[i] *= b[i];
}

void mul_SSE(double*  a,double*  b) {
  for (int i = 0; i<N/2; i++) _mm_store_pd(&a[2*i],_mm_load_pd(&b[2*i])));
}

void mul_SSE_NT(double*  a,double*  b) {
  for (int i = 0; i<N/2; i++) _mm_stream_pd(&a[2*i],_mm_load_pd(&b[2*i])));
}

void mul_SSE_OMP(double*  a,double*  b) {
  #pragma omp parallel for
  for (int i = 0; i<N; i++) a[i] *= b[i];
}

void test(aligned_double *a,aligned_double *b,const char *name) {
  double dtime;
  const double mem = 3*sizeof(double)*N*R/1024/1024/1024;
  const double maxbw = 34.1;
  dtime = -omp_get_wtime();
  for(int i=0; i<R; i++) fp(a,b);
  dtime += omp_get_wtime();
  printf("%s \t time %.2f s,efficency %.1f%%\n",name,100*mem/dtime/maxbw);
}

int main() {
  double *a = (double*)_mm_malloc(sizeof *a * N,32);
  double *b = (double*)_mm_malloc(sizeof *b * N,32);

  //b must be initialized to get the correct bandwidth!!!
  memset(a,sizeof *a * N);
  memset(b,sizeof *a * N);

  fp = mul,test(a,b,"mul        ");
  fp = mul_SSE,"mul_SSE    ");
  fp = mul_SSE_NT,"mul_SSE_NT ");
  fp = mul_SSE_OMP,"mul_SSE_OMP");

  _mm_free(a),_mm_free(b);
}

现在第一个测试

g++ -O2 -fopenmp test.cpp
./a.out
mul              time 1.67 s,13.1 GB/s,efficiency 38.5%
mul_SSE          time 1.00 s,21.9 GB/s,efficiency 64.3%
mul_SSE_NT       time 1.05 s,20.9 GB/s,efficiency 61.4%
mul_SSE_OMP      time 0.74 s,29.7 GB/s,efficiency 87.0%

所以对于没有向量化循环的-O2,我们看到固有的SSE版本比普通C解决方案要快很多.效率= bandwith_measured / max_bandwidth,其中我的系统的最大值为34.1 GB / s.

第二次测试

g++ -O3 -fopenmp test.cpp
./a.out
mul              time 1.05 s,efficiency 61.2%
mul_SSE          time 0.99 s,22.3 GB/s,efficiency 65.3%
mul_SSE_NT       time 1.01 s,21.7 GB/s,efficiency 63.7%
mul_SSE_OMP      time 0.68 s,32.5 GB/s,efficiency 95.2%

使用-O3向量化循环,内在函数基本上没有优势.

第三次测试

g++ -O3 -fopenmp -funroll-loops test.cpp
./a.out
mul              time 0.85 s,25.9 GB/s,efficency 76.1%
mul_SSE          time 0.84 s,26.2 GB/s,efficency 76.7%
mul_SSE_NT       time 1.06 s,20.8 GB/s,efficency 61.0%
mul_SSE_OMP      time 0.76 s,29.0 GB/s,efficency 85.0%

使用-funroll循环GCC将循环展开八次,除非是非时间存储解决方案,而且OpenMP解决方案不是真正的优势,我们看到了显着的改进.

在展开循环之前,大部分-O3的组件是

xor     eax,eax
.L2:
    movupd  xmm0,XMMWORD PTR [rsi+rax]
    mulpd   xmm0,XMMWORD PTR [rdi+rax]
    movaps  XMMWORD PTR [rdi+rax],xmm0
    add     rax,16
    cmp     rax,8000000
    jne     .L2
    rep ret

使用-O3 -funroll-loops,mul的程序集是:

xor     eax,XMMWORD PTR [rsi+rax]
    movupd  xmm1,XMMWORD PTR [rsi+16+rax]
    mulpd   xmm0,XMMWORD PTR [rdi+rax]
    movupd  xmm2,XMMWORD PTR [rsi+32+rax]
    mulpd   xmm1,XMMWORD PTR [rdi+16+rax]
    movupd  xmm3,XMMWORD PTR [rsi+48+rax]
    mulpd   xmm2,XMMWORD PTR [rdi+32+rax]
    movupd  xmm4,XMMWORD PTR [rsi+64+rax]
    mulpd   xmm3,XMMWORD PTR [rdi+48+rax]
    movupd  xmm5,XMMWORD PTR [rsi+80+rax]
    mulpd   xmm4,XMMWORD PTR [rdi+64+rax]
    movupd  xmm6,XMMWORD PTR [rsi+96+rax]
    mulpd   xmm5,XMMWORD PTR [rdi+80+rax]
    movupd  xmm7,XMMWORD PTR [rsi+112+rax]
    mulpd   xmm6,XMMWORD PTR [rdi+96+rax]
    movaps  XMMWORD PTR [rdi+rax],xmm0
    mulpd   xmm7,XMMWORD PTR [rdi+112+rax]
    movaps  XMMWORD PTR [rdi+16+rax],xmm1
    movaps  XMMWORD PTR [rdi+32+rax],xmm2
    movaps  XMMWORD PTR [rdi+48+rax],xmm3
    movaps  XMMWORD PTR [rdi+64+rax],xmm4
    movaps  XMMWORD PTR [rdi+80+rax],xmm5
    movaps  XMMWORD PTR [rdi+96+rax],xmm6
    movaps  XMMWORD PTR [rdi+112+rax],xmm7
    sub     rax,-128
    cmp     rax,8000000
    jne     .L2
    rep ret

第四测试

g++ -O3 -fopenmp -mavx test.cpp
./a.out
mul              time 0.87 s,25.3 GB/s,efficiency 74.3%
mul_SSE          time 0.88 s,24.9 GB/s,efficiency 73.0%
mul_SSE_NT       time 1.07 s,20.6 GB/s,efficiency 60.5%
mul_SSE_OMP      time 0.76 s,efficiency 85.2%

现在非内在函数是最快的(不包括OpenMP版本).

因此,在这种情况下,没有理由使用内在函数或内联汇编,因为我们可以使用适当的编译器选项(例如-O3,-funroll-loops,-mavx)获得最佳性能.

测试系统:Ubuntu 16.10,32GB RAM.最大内存带宽(34.1 GB / s)https://ark.intel.com/products/88967/Intel-Core-i7-6700HQ-Processor-6M-Cache-up-to-3_50-GHz

这是另一种值得考虑的解决方案. The cmp instruction is not necessary如果我们从-N计数到零,并将数组访问为N i.海湾合作委员会早就解决了这个问题.它消除了一个指令(尽管由于宏操作融合,cmp和jmp通常算作一个微操作).

void mul_SSE_v2(double*  a,double*  b) {
  for (ptrdiff_t i = -N; i<0; i+=2)
    _mm_store_pd(&a[N + i],_mm_mul_pd(_mm_load_pd(&a[N + i]),_mm_load_pd(&b[N + i])));

装配-O3

mul_SSE_v2(double*,double*):
    mov     rax,-1000000
.L9:
    movapd  xmm0,XMMWORD PTR [rdi+8000000+rax*8]
    mulpd   xmm0,XMMWORD PTR [rsi+8000000+rax*8]
    movaps  XMMWORD PTR [rdi+8000000+rax*8],2
    jne     .L9
    rep ret
}

这种优化将仅可能有助于阵列适合. L1高速缓存,即不从主存储器读取.

我终于找到了一种方法来获得纯C解决方案,不能生成cmp指令.

void mul_v2(aligned_double* __restrict a,aligned_double* __restrict b) {
  for (int i = -N; i<0; i++) a[i] *= b[i];
}

然后从这个mul_v2(& a [N],& b [N])的单独的目标文件调用函数,这样可能是最好的解决方案.但是,如果从与GCC中定义的对象文件(转换单元)相同的对象文件(转换单元)调用函数,则再次生成cmp指令.

也,

void mul_v3(aligned_double* __restrict a,aligned_double* __restrict b) {
  for (int i = -N; i<0; i++) a[N+i] *= b[N+i];
}

仍然生成cmp指令并生成与mul函数相同的程序集.

函数mul_SSE_NT是愚蠢的.它使用非时间存储器,仅在仅写入存储器时有用,但是由于功能读取和写入同一地址非时间存储器不仅无用,它们给出较差的结果.

此答案的以前版本带宽错误.原因是数组未被初始化.

原文地址:https://www.jb51.cc/c/113987.html

版权声明:本文内容由互联网用户自发贡献,该文观点与技术仅代表作者本人。本站仅提供信息存储空间服务,不拥有所有权,不承担相关法律责任。如发现本站有涉嫌侵权/违法违规的内容, 请发送邮件至 dio@foxmail.com 举报,一经查实,本站将立刻删除。

相关推荐