This is an automated email from the git hooks/post-receive script. sebastic-guest pushed a commit to branch upstream-master in repository pktools.
commit d5c90b1a3b2dab6e91a2756c65e8eff02b9f9634 Author: Pieter Kempeneers <kempe...@gmail.com> Date: Sun Nov 24 11:07:28 2013 +0100 floating point for accuracy in pkdiff.cc --- ChangeLog | 10 +++-- src/apps/pkclassify_nn.cc | 26 ++++++++++--- src/apps/pkclassify_svm.cc | 9 ++--- src/apps/pkdiff.cc | 91 ++++++++++++++++++++++++--------------------- src/apps/pkfs_nn.cc | 48 +++++++++++++++++++----- src/apps/pkfs_svm.cc | 2 +- src/apps/pkregression_nn.cc | 21 +++++++++-- 7 files changed, 137 insertions(+), 70 deletions(-) diff --git a/ChangeLog b/ChangeLog index ae3af38..c5ac434 100755 --- a/ChangeLog +++ b/ChangeLog @@ -119,14 +119,16 @@ version 2.4.1 version 2.4.2 - general removed using namespace std from header files - - PosValue.h - remove using namespace std; + - clean up of using namespace std in header files - apps/Makefile.am add GSL_LIBS to AM_LDFLAGS and LDADD todo: remove redundancy in AM_LDFLAGS and LDADD - - clean up of using namespace std in header files + - PosValue.h + remove using namespace std; - FileReaderAscii corrected error for constructor with fieldseparator argument + - pkdiff + introduced short option -m for mask - pkinfo bug fixed with -min and -max in hist (thanks to Giuseppe Amatuli for noticing) - pkfilter @@ -145,10 +147,12 @@ version 2.4.2 - pkclassify_nn.h support reading ogr files with single feature (one band only: "B" or "Band") - pkclassify_nn.cc + changed create_sparse to create_spare_array due to error in FANN... option -n for number of neurons cross validation parameter can not be 1 - pkfs_nn.cc cross validation parameter can not be 1 + solved some bugs - pkclassify_svm.cc cross validation parameter can not be 1 - pkopt_svm.cc diff --git a/src/apps/pkclassify_nn.cc b/src/apps/pkclassify_nn.cc index e3091f0..4946700 100644 --- a/src/apps/pkclassify_nn.cc +++ b/src/apps/pkclassify_nn.cc @@ -417,19 +417,35 @@ int main(int argc, char *argv[]) for(int ilayer=0;ilayer<nneuron_opt.size();++ilayer) cout << nneuron_opt[ilayer] << " "; cout << "neurons" << endl; - //test cout << "connection_opt[0]: " << connection_opt[0] << std::endl; cout << "num_layers: " << num_layers << std::endl; cout << "nFeatures: " << nFeatures << std::endl; cout << "nneuron_opt[0]: " << nneuron_opt[0] << std::endl; + cout << "number of classes (nclass): " << nclass << std::endl; } switch(num_layers){ - case(3): - net[ibag].create_sparse(connection_opt[0],num_layers, nFeatures, nneuron_opt[0], nclass); + case(3):{ + // net[ibag].create_sparse(connection_opt[0],num_layers, nFeatures, nneuron_opt[0], nclass);//replace all create_sparse with create_sparse_array due to bug in FANN! + unsigned int layers[3]; + layers[0]=nFeatures; + layers[1]=nneuron_opt[0]; + layers[2]=nclass; + net[ibag].create_sparse_array(connection_opt[0],num_layers,layers); break; - case(4): - net[ibag].create_sparse(connection_opt[0],num_layers, nFeatures, nneuron_opt[0], nneuron_opt[1], nclass); + } + case(4):{ + unsigned int layers[4]; + layers[0]=nFeatures; + layers[1]=nneuron_opt[0]; + layers[2]=nneuron_opt[1]; + layers[3]=nclass; + // layers.push_back(nFeatures); + // for(int ihidden=0;ihidden<nneuron_opt.size();++ihidden) + // layers.push_back(nneuron_opt[ihidden]); + // layers.push_back(nclass); + net[ibag].create_sparse_array(connection_opt[0],num_layers,layers); break; + } default: cerr << "Only 1 or 2 hidden layers are supported!" << endl; exit(1); diff --git a/src/apps/pkclassify_svm.cc b/src/apps/pkclassify_svm.cc index ea68208..2205a2b 100644 --- a/src/apps/pkclassify_svm.cc +++ b/src/apps/pkclassify_svm.cc @@ -541,8 +541,8 @@ int main(int argc, char *argv[]) cout << cm.getClass(iclass) << " " << cm.nReference(cm.getClass(iclass)) << " " << dua << " (" << se95_ua << ")" << " " << dpa << " (" << se95_pa << ")" << endl; } std::cout << "Kappa: " << cm.kappa() << std::endl; - doa=cm.oa_pct(&se95_oa); - std::cout << "Overall Accuracy: " << doa << " (" << se95_oa << ")" << std::endl; + doa=cm.oa(&se95_oa); + std::cout << "Overall Accuracy: " << 100*doa << " (" << 100*se95_oa << ")" << std::endl; } //--------------------------------- end of training ----------------------------------- @@ -732,7 +732,6 @@ int main(int argc, char *argv[]) exit(3); } } - //process per pixel for(int icol=0;icol<ncol;++icol){ assert(hpixel[icol].size()==nband); @@ -1119,8 +1118,8 @@ int main(int argc, char *argv[]) cout << cm.getClass(iclass) << " " << cm.nReference(cm.getClass(iclass)) << " " << dua << " (" << se95_ua << ")" << " " << dpa << " (" << se95_pa << ")" << endl; } std::cout << "Kappa: " << cm.kappa() << std::endl; - doa=cm.oa_pct(&se95_oa); - std::cout << "Overall Accuracy: " << doa << " (" << se95_oa << ")" << std::endl; + doa=cm.oa(&se95_oa); + std::cout << "Overall Accuracy: " << 100*doa << " (" << 100*se95_oa << ")" << std::endl; } } try{ diff --git a/src/apps/pkdiff.cc b/src/apps/pkdiff.cc index 7a123d8..e8c7036 100644 --- a/src/apps/pkdiff.cc +++ b/src/apps/pkdiff.cc @@ -27,16 +27,17 @@ along with pktools. If not, see <http://www.gnu.org/licenses/>. int main(int argc, char *argv[]) { - Optionpk<string> input_opt("i", "input", "Input image file.", ""); - Optionpk<string> reference_opt("r", "reference", "Reference image file", ""); - Optionpk<string> output_opt("o", "output", "Output image file. Default is empty: no output image, only report difference or identical.", ""); - Optionpk<string> mask_opt("\0", "mask", "Mask image file. A single mask is supported only, but several mask values can be used. See also mflag option. (default is empty)", ""); - Optionpk<string> colorTable_opt("\0", "ct", "color table (file with 5 columns: id R G B ALFA (0: transparent, 255: solid)", ""); + Optionpk<string> input_opt("i", "input", "Input image file."); + Optionpk<string> reference_opt("r", "reference", "Reference image file"); + Optionpk<string> output_opt("o", "output", "Output image file. Default is empty: no output image, only report difference or identical."); + Optionpk<string> mask_opt("m", "mask", "Mask image file. A single mask is supported only, but several mask values can be used. See also mflag option. (default is empty)"); + Optionpk<string> colorTable_opt("ct", "ct", "color table (file with 5 columns: id R G B ALFA (0: transparent, 255: solid)", ""); Optionpk<short> valueE_opt("\0", "correct", "Value for correct pixels (0)", 0); Optionpk<short> valueO_opt("\0", "omission", "Value for omission errors: input label > reference label (default value is 1)", 1); Optionpk<short> valueC_opt("\0", "commission", "Value for commission errors: input label < reference label (default value is 2)", 2); Optionpk<short> flag_opt("f", "flag", "No value flag(s)", 0); - Optionpk<short> mflag_opt("m", "mflag", "Mask value(s) for invalid data (positive value), or for valid data (negative value). Default is 0", 0); + Optionpk<int> invalid_opt("t", "invalid", "Mask value(s) where image is invalid. Use negative value for valid data (example: use -t -1: if only -1 is valid value)", 0); + // Optionpk<short> mflag_opt("t", "mflag", "Mask value(s) for invalid data (positive value), or for valid data (negative value). Default is 0", 0); Optionpk<short> band_opt("b", "band", "Band to extract (0)", 0); Optionpk<bool> confusion_opt("cm", "confusion", "create confusion matrix (to std out) (default value is 0)", false); Optionpk<short> lzw_opt("\0", "lzw", "compression (default value is 1)", 1); @@ -63,7 +64,7 @@ int main(int argc, char *argv[]) valueO_opt.retrieveOption(argc,argv); valueC_opt.retrieveOption(argc,argv); flag_opt.retrieveOption(argc,argv); - mflag_opt.retrieveOption(argc,argv); + invalid_opt.retrieveOption(argc,argv); band_opt.retrieveOption(argc,argv); confusion_opt.retrieveOption(argc,argv); lzw_opt.retrieveOption(argc,argv); @@ -95,8 +96,12 @@ int main(int argc, char *argv[]) cout << " " << flag_opt[iflag]; cout << endl; } - if(mask_opt[0]!="") - assert(mask_opt.size()==input_opt.size()); + + assert(input_opt.size()); + assert(reference_opt.size()); + if(mask_opt.size()) + while(mask_opt.size()<input_opt.size()) + mask_opt.push_back(mask_opt[0]); vector<short> inputRange; vector<short> referenceRange; ConfusionMatrix cm; @@ -185,7 +190,7 @@ int main(int argc, char *argv[]) pfnProgress(progress,pszMessage,pProgressArg); if(reference_opt[0].find(".shp")!=string::npos){ for(int iinput=0;iinput<input_opt.size();++iinput){ - if(output_opt[0]!="") + if(output_opt.size()) assert(reference_opt.size()==output_opt.size()); for(int iref=0;iref<reference_opt.size();++iref){ if(verbose_opt[0]) @@ -194,7 +199,7 @@ int main(int argc, char *argv[]) ImgReaderOgr referenceReader; try{ inputReader.open(input_opt[iinput]);//,imagicX_opt[0],imagicY_opt[0]); - if(mask_opt[0]!=""){ + if(mask_opt.size()){ maskReader.open(mask_opt[iinput]); assert(inputReader.nrOfCol()==maskReader.nrOfCol()); assert(inputReader.nrOfRow()==maskReader.nrOfRow()); @@ -210,7 +215,7 @@ int main(int argc, char *argv[]) ImgWriterOgr ogrWriter; OGRLayer *writeLayer; - if(output_opt[0]!=""){ + if(output_opt.size()){ if(verbose_opt[0]) cout << "creating output vector file " << output_opt[0] << endl; assert(output_opt[0].find(".shp")!=string::npos); @@ -322,7 +327,7 @@ int main(int argc, char *argv[]) if(static_cast<int>(i_centre)<0||static_cast<int>(i_centre)>=inputReader.nrOfCol()) continue; OGRFeature *writeFeature; - if(output_opt[0]!=""){ + if(output_opt.size()){ writeFeature = OGRFeature::CreateFeature(writeLayer->GetLayerDefn()); if(verbose_opt[0]) cout << "copying fields from " << reference_opt[0] << endl; @@ -359,18 +364,18 @@ int main(int argc, char *argv[]) break; } } - maskFlagged=false;//(mflag_opt[ivalue]>=0)?false:true; - if(mask_opt[0]!=""){ + maskFlagged=false;//(invalid_opt[ivalue]>=0)?false:true; + if(mask_opt.size()){ maskReader.readData(maskValue,GDT_Int16,i,j,band_opt[0]); - for(int ivalue=0;ivalue<mflag_opt.size();++ivalue){ - if(mflag_opt[ivalue]>=0){//values set in mflag_opt are invalid - if(maskValue==mflag_opt[ivalue]){ + for(int ivalue=0;ivalue<invalid_opt.size();++ivalue){ + if(invalid_opt[ivalue]>=0){//values set in invalid_opt are invalid + if(maskValue==invalid_opt[ivalue]){ maskFlagged=true; break; } } - else{//only values set in mflag_opt are valid - if(maskValue!=-mflag_opt[ivalue]) + else{//only values set in invalid_opt are valid + if(maskValue!=-invalid_opt[ivalue]) maskFlagged=true; else{ maskFlagged=false; @@ -393,7 +398,7 @@ int main(int argc, char *argv[]) //flag if not all pixels are homogeneous or if at least one pixel flagged if(!windowHasFlag&&isHomogeneous){ - if(output_opt[0]!="") + if(output_opt.size()) writeFeature->SetField(labelclass_opt[0].c_str(),static_cast<int>(inputValue)); if(confusion_opt[0]){ ++ntotalValidation; @@ -445,7 +450,7 @@ int main(int argc, char *argv[]) fs << labelclass_opt[0] << "_" << windowJ << "_" << windowI; else fs << labelclass_opt[0]; - if(output_opt[0]!="") + if(output_opt.size()) writeFeature->SetField(fs.str().c_str(),static_cast<int>(inputValue)); if(!windowJ&&!windowI){//centre pixel if(confusion_opt[0]){ @@ -486,7 +491,7 @@ int main(int argc, char *argv[]) } } } - if(output_opt[0]!=""){ + if(output_opt.size()){ if(!windowAllFlagged){ if(verbose_opt[0]) cout << "creating feature" << endl; @@ -498,11 +503,11 @@ int main(int argc, char *argv[]) OGRFeature::DestroyFeature( writeFeature ); } } - if(output_opt[0]!="") + if(output_opt.size()) ogrWriter.close(); referenceReader.close(); inputReader.close(); - if(mask_opt[0]!="") + if(mask_opt.size()) maskReader.close(); } } @@ -511,9 +516,9 @@ int main(int argc, char *argv[]) ImgWriterGdal imgWriter; try{ inputReader.open(input_opt[0]);//,imagicX_opt[0],imagicY_opt[0]); - if(mask_opt[0]!="") + if(mask_opt.size()) maskReader.open(mask_opt[0]); - if(output_opt[0]!=""){ + if(output_opt.size()){ if(verbose_opt[0]) cout << "opening output image " << output_opt[0] << endl; string compression=(lzw_opt[0])? "LZW":"NONE"; @@ -547,7 +552,7 @@ int main(int argc, char *argv[]) vector<short> lineInput(inputReader.nrOfCol()); vector<short> lineMask(maskReader.nrOfCol()); vector<short> lineOutput; - if(output_opt[0]!="") + if(output_opt.size()) lineOutput.resize(inputReader.nrOfCol()); int irow=0; @@ -581,7 +586,7 @@ int main(int argc, char *argv[]) cout << referenceRange[rc] << endl; } if(referenceRange.size()!=inputRange.size()){ - if(confusion_opt[0]||output_opt[0]!=""){ + if(confusion_opt[0]||output_opt.size()){ cout << "reference range is not equal to input range!" << endl; cout << "Kappa: " << 0 << endl; cout << "total weighted: " << 0 << endl; @@ -595,8 +600,8 @@ int main(int argc, char *argv[]) for(irow=0;irow<inputReader.nrOfRow()&&!isDifferent;++irow){ //read line in lineInput, lineReference and lineMask inputReader.readData(lineInput,GDT_Int16,irow,band_opt[0]); - if(mask_opt[0]!="") - maskReader.readData(lineMask,GDT_Int16,irow,band_opt[0]); + if(mask_opt.size()) + maskReader.readData(lineMask,GDT_Int16,irow); double x,y;//geo coordinates double ireference,jreference;//image coordinates in reference image for(icol=0;icol<inputReader.nrOfCol();++icol){ @@ -624,15 +629,15 @@ int main(int argc, char *argv[]) bool flagged=false; for(int iflag=0;iflag<flag_opt.size();++iflag){ if((lineInput[icol]==flag_opt[iflag])||(lineReference[ireference]==flag_opt[iflag])){ - if(output_opt[0]!="") + if(output_opt.size()) lineOutput[icol]=flag_opt[iflag]; flagged=true; break; } } - if(mask_opt[0]!=""){ - for(int ivalue=0;ivalue<mflag_opt.size();++ivalue){ - if(lineMask[icol]==mflag_opt[ivalue]){ + if(mask_opt.size()){ + for(int ivalue=0;ivalue<invalid_opt.size();++ivalue){ + if(lineMask[icol]==invalid_opt[ivalue]){ flagged=true; break; } @@ -652,7 +657,7 @@ int main(int argc, char *argv[]) cm.incrementResult(cm.getClass(rc),cm.getClass(ic),1); } if(lineInput[icol]==lineReference[ireference]){//correct - if(output_opt[0]!=""){ + if(output_opt.size()){ if(valueE_opt[0]!=flag_opt[0]) lineOutput[icol]=valueE_opt[0]; else @@ -664,7 +669,7 @@ int main(int argc, char *argv[]) isDifferent=true; break; } - if(output_opt[0]!=""){ + if(output_opt.size()){ if(lineInput[icol]<20){//forest if(lineReference[icol]>=20)//gain lineOutput[icol]=lineInput[icol]*10+1;//GAIN is 111,121,131 @@ -685,11 +690,11 @@ int main(int argc, char *argv[]) } else{ ++nflagged; - if(output_opt[0]!="") + if(output_opt.size()) lineOutput[icol]=flag_opt[0]; } } - if(output_opt[0]!=""){ + if(output_opt.size()){ try{ imgWriter.writeData(lineOutput,GDT_Int16,irow); } @@ -709,7 +714,7 @@ int main(int argc, char *argv[]) if(!verbose_opt[0]) pfnProgress(progress,pszMessage,pProgressArg); } - if(output_opt[0]!="") + if(output_opt.size()) imgWriter.close(); else if(!confusion_opt[0]){ if(isDifferent) @@ -719,7 +724,7 @@ int main(int argc, char *argv[]) } referenceReader.close(); inputReader.close(); - if(mask_opt[0]!="") + if(mask_opt.size()) maskReader.close(); } @@ -796,8 +801,8 @@ int main(int argc, char *argv[]) dpa=cm.pa_pct(classNames[iclass],&se95_pa); cout << cm.getClass(iclass) << " " << cm.nReference(cm.getClass(iclass)) << " " << dua << " (" << se95_ua << ")" << " " << dpa << " (" << se95_pa << ")" << endl; } - doa=cm.oa_pct(&se95_oa); + doa=cm.oa(&se95_oa); cout << "Kappa: " << cm.kappa() << endl; - cout << "Overall Accuracy: " << doa << " (" << se95_oa << ")" << endl; + cout << "Overall Accuracy: " << 100*doa << " (" << 100*se95_oa << ")" << endl; } } diff --git a/src/apps/pkfs_nn.cc b/src/apps/pkfs_nn.cc index 2fbca98..53928ff 100644 --- a/src/apps/pkfs_nn.cc +++ b/src/apps/pkfs_nn.cc @@ -82,12 +82,25 @@ double getCost(const vector<Vector2d<float> > &trainingFeatures) cout << "neurons" << endl; } switch(num_layers){ - case(3): - net.create_sparse(connection_opt[0],num_layers, nFeatures, nneuron_opt[0], nclass); + case(3):{ + unsigned int layers[3]; + layers[0]=nFeatures; + layers[1]=nneuron_opt[0]; + layers[2]=nclass; + net.create_sparse_array(connection_opt[0],num_layers,layers); + // net.create_sparse(connection_opt[0],num_layers, nFeatures, nneuron_opt[0], nclass); break; - case(4): - net.create_sparse(connection_opt[0],num_layers, nFeatures, nneuron_opt[0], nneuron_opt[1], nclass); + } + case(4):{ + unsigned int layers[4]; + layers[0]=nFeatures; + layers[1]=nneuron_opt[0]; + layers[2]=nneuron_opt[1]; + layers[3]=nclass; + net.create_sparse_array(connection_opt[0],num_layers,layers); + // net.create_sparse(connection_opt[0],num_layers, nFeatures, nneuron_opt[0], nneuron_opt[1], nclass); break; + } default: cerr << "Only 1 or 2 hidden layers are supported!" << endl; exit(1); @@ -95,7 +108,6 @@ double getCost(const vector<Vector2d<float> > &trainingFeatures) } net.set_learning_rate(learning_opt[0]); - // net.set_activation_steepness_hidden(1.0); // net.set_activation_steepness_output(1.0); @@ -116,8 +128,9 @@ double getCost(const vector<Vector2d<float> > &trainingFeatures) else if(cm.getClassIndex(type2string<short>(classValueMap[nameVector[iname]]))<0) cm.pushBackClassName(type2string<short>(classValueMap[nameVector[iname]])); } - vector<Vector2d<float> > tmpFeatures; + vector<Vector2d<float> > tmpFeatures(nclass); for(int iclass=0;iclass<nclass;++iclass){ + tmpFeatures[iclass].resize(trainingFeatures[iclass].size(),nFeatures); for(unsigned int isample=0;isample<nctraining[iclass];++isample){ for(int ifeature=0;ifeature<nFeatures;++ifeature){ tmpFeatures[iclass][isample][ifeature]=trainingFeatures[iclass][isample][ifeature]; @@ -142,8 +155,14 @@ double getCost(const vector<Vector2d<float> > &trainingFeatures) cm.incrementResult(cm.getClass(referenceVector[isample]),cm.getClass(outputVector[isample]),1.0); } } - else{ + else{//not working yet. please repair... + assert(cv_opt[0]>0); bool initWeights=true; + //test + cout << "tempFeatures.size(): " << tmpFeatures.size() << endl; + cout << "ntraining: " << ntraining << endl; + cout << "initWeights: " << initWeights << endl; + cout << "maxit_opt.size(): " << maxit_opt.size() << endl; net.train_on_data(tmpFeatures,ntraining,initWeights, maxit_opt[0], iterations_between_reports, desired_error); vector<Vector2d<float> > testFeatures(nclass); @@ -155,6 +174,8 @@ double getCost(const vector<Vector2d<float> > &trainingFeatures) for(int ifeature=0;ifeature<nFeatures;++ifeature){ testFeatures[iclass][isample][ifeature]=trainingFeatures[iclass][nctraining[iclass]+isample][ifeature]; } + //test + cout << "isample:" << isample<< endl; result=net.run(testFeatures[iclass][isample]); string refClassName=nameVector[iclass]; float maxP=-1; @@ -165,13 +186,19 @@ double getCost(const vector<Vector2d<float> > &trainingFeatures) maxClass=ic; } } + //test + cout << "maxClass:" << maxClass << "(" << nameVector.size() << ")" << endl; string className=nameVector[maxClass]; + //test + cout << "className:" << nameVector[maxClass] << endl; if(classValueMap.size()) cm.incrementResult(type2string<short>(classValueMap[refClassName]),type2string<short>(classValueMap[className]),1.0); else cm.incrementResult(cm.getClass(referenceVector[isample]),cm.getClass(outputVector[isample]),1.0); } } + //test + cout << "debug12" << endl; } assert(cm.nReference()); return(cm.kappa()); @@ -241,7 +268,9 @@ int main(int argc, char *argv[]) selMap["sbs"]=SBS; selMap["bfs"]=BFS; - assert(training_opt[0].size()); + assert(training_opt.size()); + if(input_opt.size()) + cv_opt[0]=0; if(verbose_opt[0]>=1) std::cout << "training shape file: " << training_opt[0] << std::endl; @@ -348,6 +377,7 @@ int main(int argc, char *argv[]) std::cout << mapit->first << ": " << (mapit->second).size() << " samples" << std::endl; ++mapit; } + nclass=trainingPixels.size(); if(classname_opt.size()) assert(nclass==classname_opt.size()); nband=trainingPixels[0][0].size()-2;//X and Y//trainingPixels[0][0].size(); @@ -511,7 +541,7 @@ int main(int argc, char *argv[]) cost=getCost(trainingFeatures); } else{ - while(cost-previousCost>epsilon_cost_opt[0]){ + while(fabs(cost-previousCost)>epsilon_cost_opt[0]){ previousCost=cost; switch(selMap[selector_opt[0]]){ case(SFFS): diff --git a/src/apps/pkfs_svm.cc b/src/apps/pkfs_svm.cc index b8b69de..1b12dd0 100644 --- a/src/apps/pkfs_svm.cc +++ b/src/apps/pkfs_svm.cc @@ -563,7 +563,7 @@ int main(int argc, char *argv[]) cost=getCost(trainingFeatures); } else{ - while(cost-previousCost>epsilon_cost_opt[0]){ + while(fabs(cost-previousCost)>epsilon_cost_opt[0]){ previousCost=cost; switch(selMap[selector_opt[0]]){ case(SFFS): diff --git a/src/apps/pkregression_nn.cc b/src/apps/pkregression_nn.cc index 4f3192c..88155e0 100644 --- a/src/apps/pkregression_nn.cc +++ b/src/apps/pkregression_nn.cc @@ -191,12 +191,25 @@ int main(int argc, char *argv[]) } switch(num_layers){ - case(3): - net.create_sparse(connection_opt[0],num_layers, ninput, nneuron_opt[0], noutput); + case(3):{ + unsigned int layers[3]; + layers[0]=ninput; + layers[1]=nneuron_opt[0]; + layers[2]=noutput; + net.create_sparse_array(connection_opt[0],num_layers,layers); + // net.create_sparse(connection_opt[0],num_layers, ninput, nneuron_opt[0], noutput); break; - case(4): - net.create_sparse(connection_opt[0],num_layers, ninput, nneuron_opt[0], nneuron_opt[1], noutput); + } + case(4):{ + unsigned int layers[3]; + layers[0]=ninput; + layers[1]=nneuron_opt[0]; + layers[2]=nneuron_opt[1]; + layers[3]=noutput; + net.create_sparse_array(connection_opt[0],num_layers,layers); + // net.create_sparse(connection_opt[0],num_layers, ninput, nneuron_opt[0], nneuron_opt[1], noutput); break; + } default: cerr << "Only 1 or 2 hidden layers are supported!" << endl; exit(1); -- Alioth's /usr/local/bin/git-commit-notice on /srv/git.debian.org/git/pkg-grass/pktools.git _______________________________________________ Pkg-grass-devel mailing list Pkg-grass-devel@lists.alioth.debian.org http://lists.alioth.debian.org/cgi-bin/mailman/listinfo/pkg-grass-devel