2020-04-02 openMP

    int nthreads, tid;
     #pragma omp parallel private(nthreads, tid)
     {
 
         /* Obtain thread number */
         tid = omp_get_thread_num();
         printf("Hello World from thread = %d\n", tid);
 
         /* Only master thread does this */
         if (tid == 0)
         {
             nthreads = omp_get_num_threads();
             printf("Number of threads = %d\n", nthreads);
         }

    }  /* All threads join master thread and disband */  
    int x = 2;
    # pragma omp parallel num_threads(2) shared(x)
    {
        if (omp_get_thread_num() == 0){
            x = 5;
        }else{
            printf("1: Thread# %d: x = %d\n", omp_get_thread_num(),x);
        }
        #pragma omp barrier
        if (omp_get_thread_num() == 0){
            printf("2: Thread# %d: x = %d\n", omp_get_thread_num(),x);
        }else{
            printf("3: Thread# %d: x = %d\n", omp_get_thread_num(),x);
        }
    }
    int nthreads,tid,var3;
    var3=30;
    char buf[32];
/* Fork a team of threads */
    #pragma omp parallel private(nthreads,tid) shared(var3)
    {
        tid= omp_get_thread_num(); /* Obtain and print thread id */
        printf("Hello, world from OpenMPthread %d, shared var=%d\n", tid,var3);
        if (tid== 0) /*Only master thread does this */
        {
            nthreads= omp_get_num_threads();
            printf(" Number of threads %d, shared var=%d\n",nthreads,var3);
        }
    }
static long num_steps= 10000000;
double step;
#define NUM_THREADS 4
int main(){    
    int i; 
    double x, pi, sum = 0.0, start_time,end_time;
    step = 1.0/(double) num_steps;
    start_time=clock();
    for (i=1;i<= num_steps; i++){
        x = (i-0.5)*step;
        sum = sum+ 4.0/(1.0+x*x);
    }
    pi = step * sum;
    end_time=clock();
    printf("Pi=%.10lf\nRunning time %d\n", pi, end_time-start_time);

    return 0;
}
static long num_steps= 100000;
double step;
#define NUM_THREADS 4
int main(){

    int i ; 
    double pi, sum[NUM_THREADS] , start_time, end_time;
    step = 1.0/(double) num_steps;
    omp_set_num_threads(NUM_THREADS);
    start_time=omp_get_wtime();
    #pragma omp parallel
    {
        int id; double x;
        id = omp_get_thread_num();
        for (i=id, sum[id]=0.0; i< num_steps; i=i+NUM_THREADS){
            x = (i+0.5)*step; 
            sum[id] += 4.0/(1.0+x*x);
        }   
    }
    for (i=0, pi=0.0;i
    int i,id; 
    double start_time, end_time;
    double x, pi, sum[NUM_THREADS];
    step = 1.0/(double) num_steps;
    omp_set_num_threads(NUM_THREADS);
    start_time=omp_get_wtime();
    #pragma omp  parallel private(x, id)
    {
        id = omp_get_thread_num(); sum[id] = 0;
        #pragma omp  for
        for (i=0;i< num_steps; i++){
        x = (i+0.5)*step;
        sum[id] += 4.0/(1.0+x*x);
        }
    }
    for(i=0, pi=0.0;i

要避免数据竞争就要声明私有变量
局部变量都是私有的
Pi的并行计算每个id占据一个数组位置,最后对数组进行累加

你可能感兴趣的:(2020-04-02 openMP)