Warning: file_get_contents(/data/phpspider/zhask/data//catemap/6/cplusplus/163.json): failed to open stream: No such file or directory in /data/phpspider/zhask/libs/function.php on line 167

Warning: Invalid argument supplied for foreach() in /data/phpspider/zhask/libs/tag.function.php on line 1116

Notice: Undefined index: in /data/phpspider/zhask/libs/function.php on line 180

Warning: array_chunk() expects parameter 1 to be array, null given in /data/phpspider/zhask/libs/function.php on line 181

Warning: file_get_contents(/data/phpspider/zhask/data//catemap/6/multithreading/4.json): failed to open stream: No such file or directory in /data/phpspider/zhask/libs/function.php on line 167

Warning: Invalid argument supplied for foreach() in /data/phpspider/zhask/libs/tag.function.php on line 1116

Notice: Undefined index: in /data/phpspider/zhask/libs/function.php on line 180

Warning: array_chunk() expects parameter 1 to be array, null given in /data/phpspider/zhask/libs/function.php on line 181
运行多线程时双重释放或损坏 我在C++程序中遇到了一个运行时错误“双自由或损坏”,它调用了一个可靠的库ANN,并使用OpenMP来对A进行循环化。p> *** glibc detected *** /home/tim/test/debug/test: double free or corruption (!prev): 0x0000000002527260 ***_C++_Multithreading - Fatal编程技术网

运行多线程时双重释放或损坏 我在C++程序中遇到了一个运行时错误“双自由或损坏”,它调用了一个可靠的库ANN,并使用OpenMP来对A进行循环化。p> *** glibc detected *** /home/tim/test/debug/test: double free or corruption (!prev): 0x0000000002527260 ***

运行多线程时双重释放或损坏 我在C++程序中遇到了一个运行时错误“双自由或损坏”,它调用了一个可靠的库ANN,并使用OpenMP来对A进行循环化。p> *** glibc detected *** /home/tim/test/debug/test: double free or corruption (!prev): 0x0000000002527260 ***,c++,multithreading,C++,Multithreading,这是否意味着地址0x0000000002527260处的内存被多次释放 错误发生在函数classify_variable_k()内部的“_search_struct->annkSearch(queryPt,k_max,nnIdx,dists,_eps);”处,而函数classify_variable_k()又位于OpenMP for loop inside function tune_complexity()内部 请注意,当OpenMP有多个线程时会发生此错误,而在单线程情况下不会发生此错误。不

这是否意味着地址0x0000000002527260处的内存被多次释放

错误发生在函数classify_variable_k()内部的“_search_struct->annkSearch(queryPt,k_max,nnIdx,dists,_eps);”处,而函数classify_variable_k()又位于OpenMP for loop inside function tune_complexity()内部

请注意,当OpenMP有多个线程时会发生此错误,而在单线程情况下不会发生此错误。不知道为什么

下面是我的代码。如果还不够诊断,就告诉我。谢谢你的帮助

  void KNNClassifier::train(int nb_examples, int dim, double **features, int * labels) {                         
      _nPts = nb_examples;  

      _labels = labels;  
      _dataPts = features;  

      setting_ANN(_dist_type,1);   

    delete _search_struct;  
    if(strcmp(_search_neighbors, "brutal") == 0) {                                                                 
      _search_struct = new ANNbruteForce(_dataPts, _nPts, dim);  
    }else if(strcmp(_search_neighbors, "kdtree") == 0) {  
      _search_struct = new ANNkd_tree(_dataPts, _nPts, dim);  
      }  

  }  


      void KNNClassifier::classify_various_k(int dim, double *feature, int label, int *ks, double * errors, int nb_ks, int k_max) {            
        ANNpoint      queryPt = 0;                                                                                                                
        ANNidxArray   nnIdx = 0;                                                                                                         
        ANNdistArray  dists = 0;                                                                                                         

        queryPt = feature;     
        nnIdx = new ANNidx[k_max];                                                               
        dists = new ANNdist[k_max];                                                                                

        if(strcmp(_search_neighbors, "brutal") == 0) {                                                                               
          _search_struct->annkSearch(queryPt, k_max,  nnIdx, dists, _eps);    
        }else if(strcmp(_search_neighbors, "kdtree") == 0) {    
          _search_struct->annkSearch(queryPt, k_max,  nnIdx, dists, _eps); // where error occurs    
        }    

        for (int j = 0; j < nb_ks; j++)    
        {    
          scalar_t result = 0.0;    
          for (int i = 0; i < ks[j]; i++) {                                                                                      
              result+=_labels[ nnIdx[i] ];    
          }    
          if (result*label<0) errors[j]++;    
        }    

        delete [] nnIdx;    
        delete [] dists;    

      }    

      void KNNClassifier::tune_complexity(int nb_examples, int dim, double **features, int *labels, int fold, char *method, int nb_examples_test, double **features_test, int *labels_test) {    
          int nb_try = (_k_max - _k_min) / scalar_t(_k_step);    
          scalar_t *error_validation = new scalar_t [nb_try];    
          int *ks = new int [nb_try];    

          for(int i=0; i < nb_try; i ++){    
            ks[i] = _k_min + _k_step * i;    
          }    

          if (strcmp(method, "ct")==0)                                                                                                                     
          {    

            train(nb_examples, dim, features, labels );// train once for all nb of nbs in ks                                                                                                

            for(int i=0; i < nb_try; i ++){    
              if (ks[i] > nb_examples){nb_try=i; break;}    
              error_validation[i] = 0;    
            }    

            int i = 0;    
      #pragma omp parallel shared(nb_examples_test, error_validation,features_test, labels_test, nb_try, ks) private(i)    
            {    
      #pragma omp for schedule(dynamic) nowait    
              for (i=0; i < nb_examples_test; i++)         
              {    
                classify_various_k(dim, features_test[i], labels_test[i], ks, error_validation, nb_try, ks[nb_try - 1]); // where error occurs    
              }    
            }    
            for (i=0; i < nb_try; i++)    
            {    
              error_validation[i]/=nb_examples_test;    
            }    
          }

          ......
     }
void knn分类器::train(int nb_示例,int dim,双**特征,int*标签){
_nPts=nb_示例;
_标签=标签;
_数据点=特征;
设置ANN(_dist_type,1);
删除搜索结构;
如果(strcmp(_search_neighbories,“野蛮”)==0{
_search_struct=newannbruteforce(_dataPts,_nPts,dim);
}else if(strcmp(_search_neights,“kdtree”)==0{
_search_struct=新的ANNkd_树(_dataPts,_nPts,dim);
}  
}  
void knn分类器::分类各种(int dim,double*特征,int标签,int*ks,double*错误,int nb_ks,int k_max){
ANNpoint queryPt=0;
annidx数组nnIdx=0;
ANNdistArray dists=0;
queryPt=特征;
nnIdx=新的ANNidx[k_max];
dists=新的ANNdist[k_max];
如果(strcmp(_search_neighbories,“野蛮”)==0{
_搜索结构->annkSearch(queryPt、k_max、nnIdx、dists、eps);
}else if(strcmp(_search_neights,“kdtree”)==0{
_search_struct->annkSearch(queryPt,k_max,nnIdx,dists,_eps);//发生错误的位置
}    
对于(int j=0;j

更新:

谢谢!我现在正试图通过使用“#pragma omp critical”来纠正classify_variable_k()中写入相同内存问题的冲突:

void knn分类器::分类各种(int dim,double*特征,int标签,int*ks,double*错误,int nb\u ks,int k\u max){
ANNpoint queryPt=0;
annidx数组nnIdx=0;
ANNdistArray dists=0;
queryPt=feature;//for(int i=0;icontent[i];}
nnIdx=新的ANNidx[k_max];
dists=新的ANNdist[k_max];
if(strcmp(_search_neights,“野蛮”)==0{//search
_搜索结构->annkSearch(queryPt、k_max、nnIdx、dists、eps);
}else if(strcmp(_search_neights,“kdtree”)==0{
_搜索结构->annkSearch(queryPt、k_max、nnIdx、dists、eps);
}  
对于(int j=0;j如果(result*label您的train方法在分配新内存之前删除了_search\u struct。因此,第一次调用train时,它会被删除。在调用train之前,是否有代码分配它?您可能会尝试删除垃圾内存(尽管我们没有代码告诉您)。

我不知道这是否是您的问题,但是:

void KNNClassifier::train(int nb_examples, int dim, double **features, int * labels) {
  ...
  delete _search_struct;
  if(strcmp(_search_neighbors, "brutal") == 0) {
    _search_struct = new ANNbruteForce(_dataPts, _nPts, dim);
  }else if(strcmp(_search_neighbors, "kdtree") == 0) {  
    _search_struct = new ANNkd_tree(_dataPts, _nPts, dim);
  }
}  
如果您不属于
if
else if
子句,会发生什么?您已删除
\u search\u struct
并将其指向垃圾。之后应将其设置为
NULL

如果这不是问题,您可以尝试更换:

delete p;
与:

(或者类似地,对于
delete[]
站点)。(但是,这可能会对第一次调用
knnlindicator::train
造成问题。)


另外,必须:您真的需要执行所有这些手动分配和解除分配吗?为什么您不至少使用
std::vector
而不是
new[]
/
delete[]
(这几乎总是不好的)?

好的,既然您已经声明它在单线程情况下工作正常方法无效。您需要执行以下操作:

  • 查找并行访问的所有变量
  • 尤其是delete p;
assert(p != NULL);
delete p;
p = NULL;
shared(nb_examples_test, error_validation,features_test, labels_test, nb_try, ks)
      for (int i = 0; i < ks[j]; i++) {
         result+=_labels[ nnIdx[i] ]; 
      }    
      if (result*label<0) errors[j]++;