类weka.core.converters.ArffLoader.ArffReader源码实例Demo

下面列出了怎么用weka.core.converters.ArffLoader.ArffReader的API类实例代码及写法,或者点击链接到github查看源代码。

@Test
public void testImportanceEstimation() throws IOException, ExtractionOfImportantParametersFailedException {
	PerformanceKnowledgeBase pkb = new PerformanceKnowledgeBase();
	try (BufferedReader reader = Files.newBufferedReader(Paths.get(testFile), StandardCharsets.UTF_8)) {
		ArffReader arffReader = new ArffReader(reader);
		Instances data = arffReader.getData();
		data.setClassIndex(data.numAttributes() - 1);
		Component component = new Component("Component");
		ComponentInstance composition = new ComponentInstance(component, null, null);
		pkb.setPerformanceSamples(data, composition, "test");
		FANOVAParameterImportanceEstimator importanceEstimator = new FANOVAParameterImportanceEstimator("test", 2, 0.08);
		importanceEstimator.setPerformanceKnowledgeBase(pkb);
		Set<String> importantParams = importanceEstimator.extractImportantParameters(composition, false);
		LOGGER.info("important parameters: {}", importantParams);

	}
	assertTrue(true);
}
 
源代码2 项目: AILibs   文件: ExtendedM5ForestTest.java
@Before
public void testTrain() throws Exception {
	for (int dataset_index = 0; dataset_index < dataset_count; dataset_index++) {
		for (int noise_index = 0; noise_index < noise_count; noise_index++) {
			String dataset_name = getDatasetNameForIndex(dataset_index, noise_index);
			try (BufferedReader reader = Files.newBufferedReader(Paths.get(dataset_name), StandardCharsets.UTF_8)) {
				ArffReader arffReader = new ArffReader(reader);
				Instances data = arffReader.getData();
				for (int seed = 0; seed < seedNum; seed++) {
					data.setClassIndex(data.numAttributes() - 1);
					this.classifier[dataset_index][noise_index][seed] = new ExtendedM5Forest(seed);
					this.classifier[dataset_index][noise_index][seed].buildClassifier(data);
				}
			}
		}
	}
}
 
源代码3 项目: AILibs   文件: ExtendedRandomForestTest.java
@Before
public void testTrain() throws Exception {
	for (int dataset_index = 0; dataset_index < dataset_count; dataset_index++) {
		for (int noise_index = 0; noise_index < noise_count; noise_index++) {
			String dataset_name = getDatasetNameForIndex(dataset_index, noise_index);
			try (BufferedReader reader = Files.newBufferedReader(Paths.get(dataset_name), StandardCharsets.UTF_8)) {
				ArffReader arffReader = new ArffReader(reader);
				Instances data = arffReader.getData();
				for (int seed = 0; seed < seedNum; seed++) {
					data.setClassIndex(data.numAttributes() - 1);
					this.classifier[dataset_index][noise_index][seed] = new ExtendedRandomForest(seed);
					this.classifier[dataset_index][noise_index][seed].buildClassifier(data);
				}
				System.out.println("Finished training. " + datasets[dataset_index] + ", " + noise[noise_index]);
			}
		}
	}
}
 
源代码4 项目: AILibs   文件: ExtendedRandomTreeTest.java
/**
 * Test the classifier without any cross-validation
 * @throws IOException
 */
@Test
public void testPredict() throws IOException {
	try (BufferedReader reader = Files.newBufferedReader(Paths.get(testFile), StandardCharsets.UTF_8)) {
		ArffReader arffReader = new ArffReader(reader);
		Instances data = arffReader.getData();
		for (Instance instance : data) {
			// construct the real interval
			double lower = instance.value(data.numAttributes() - 1);
			double upper = instance.value(data.numAttributes() - 2);
			Instance strippedInstance = new DenseInstance(data.numAttributes() - 2);
			for (int i = 0; i < data.numAttributes() - 2; i++) {
				strippedInstance.setValue(i, instance.value(i));
			}
			Interval actualInterval = new Interval(upper, lower);
			Interval predictedInterval = this.classifier.predictInterval(strippedInstance);
			System.out.println("Actual interval: " + actualInterval + ", predicted Interval " + predictedInterval);
		}

	}
}
 
源代码5 项目: tsml   文件: CSVLoader.java
@Override
public Instances getDataSet() throws IOException {

  if (m_sourceReader == null) {
    throw new IOException("No source has been specified");
  }

  if (getRetrieval() == INCREMENTAL) {
    throw new IOException(
        "Cannot mix getting instances in both incremental and batch modes");
  }
  setRetrieval(BATCH);

  if (m_structure == null) {
    getStructure();
  }

  while (readData(true))
    ;

  m_dataDumper.flush();
  m_dataDumper.close();

  // make final structure
  makeStructure();

  Reader sr = new BufferedReader(new FileReader(m_tempFile));
  ArffReader initialArff = new ArffReader(sr, m_structure, 0);
  Instances initialInsts = initialArff.getData();
  sr.close();
  initialArff = null;

  return initialInsts;
}
 
源代码6 项目: apogen   文件: WekaClusterers.java
/**
 * Run WEKA Hierarchical clustering on the parameter ARFF file searching for
 * numClusters clusters
 * 
 * @param filename
 * @param numClusters
 * @param linkage
 * @return
 * @throws Exception
 */
public static LinkedHashMap<Integer, LinkedList<String>> runHierarchical(String filename, String numClusters,
		String linkage) throws Exception {

	String[] options = new String[6];
	options[0] = "-t";
	options[1] = filename;
	options[2] = "-N";
	options[3] = numClusters;
	options[4] = "-L";
	options[5] = linkage;

	HierarchicalClusterer c = new HierarchicalClusterer();

	c.setNumClusters(Integer.parseInt(numClusters));
	c.setDebug(false);
	c.setPrintNewick(true);

	BufferedReader reader = new BufferedReader(new FileReader(filename));
	ArffReader arff = new ArffReader(reader);
	Instances data = arff.getData();
	data.setClassIndex(0);

	c.buildClusterer(data);

	LinkedHashMap<Integer, LinkedList<String>> output = new LinkedHashMap<Integer, LinkedList<String>>();

	// initialize clusters map
	for (int i = 0; i < Integer.parseInt(numClusters); i++) {
		output.put(new Integer(i), new LinkedList<String>());
	}

	for (Instance instance : data) {
		// System.out.println(instance.stringValue(0) + "\t" +
		// c.clusterInstance(instance));
		output.get(c.clusterInstance(instance)).add(instance.stringValue(0));
	}

	return output;
}
 
源代码7 项目: AILibs   文件: ExtendedRandomTreeTest.java
@Before
public void testTrain() throws Exception {
	try (BufferedReader reader = Files.newBufferedReader(Paths.get(trainFile), StandardCharsets.UTF_8)) {
		ArffReader arffReader = new ArffReader(reader);
		Instances data = arffReader.getData();
		data.setClassIndex(data.numAttributes() - 1);

		this.classifier = new ExtendedRandomTree();
		this.classifier.buildClassifier(data);

	}
}
 
源代码8 项目: AILibs   文件: ExtendedM5ForestTest.java
/**
 * Test the classifier without any cross-validation
 * @throws IOException
 */
@Test
public void testPredict() throws IOException {
	for (int dataset_index = 0; dataset_index < dataset_count; dataset_index++) {
		for (int noise_index = 0; noise_index < noise_count; noise_index++) {
			for (int seed = 0; seed < seedNum; seed++) {
				String testfile_name = this.getTestFileName(dataset_index);
				try (BufferedReader reader = Files.newBufferedReader(Paths.get(testfile_name),
						StandardCharsets.UTF_8)) {
					ArffReader arffReader = new ArffReader(reader);
					Instances data = arffReader.getData();
					List<Double> predictedLowers = new ArrayList<>();
					List<Double> actualLowers = new ArrayList<>();
					List<Double> predictedUppers = new ArrayList<>();
					List<Double> actualUppers = new ArrayList<>();
					for (Instance instance : data) {
						// construct the real interval
						double lower = instance.value(data.numAttributes() - 2);
						double upper = instance.value(data.numAttributes() - 1);
						Instance strippedInstance = new DenseInstance(data.numAttributes() - 2);
						for (int i = 0; i < data.numAttributes() - 2; i++) {
							strippedInstance.setValue(i, instance.value(i));
						}
						Interval predictedInterval = this.classifier[dataset_index][noise_index][seed]
								.predictInterval(strippedInstance);
						// System.out.println(
						// "Actual interval: " + actualInterval + ", predicted Interval " +
						// predictedInterval);
						predictedLowers.add(predictedInterval.getInf());
						predictedUppers.add(predictedInterval.getSup());
						actualLowers.add(lower);
						actualUppers.add(upper);
					}

					double l1LossLower = L1Loss(predictedLowers, actualLowers);
					double l1LossUpper = L1Loss(predictedUppers, actualUppers);
					// System.out.println("L1 loss for the lower bound is " + l1LossLower);
					// System.out.println("L1 loss for the upper bound is " + l1LossUpper);

					l1Lower[dataset_index][noise_index][seed] = l1LossLower;
					l1Upper[dataset_index][noise_index][seed] = l1LossUpper;

				}
			}
			double lowerMax = Arrays.stream(l1Lower[dataset_index][noise_index]).max().getAsDouble();
			double upperMax = Arrays.stream(l1Upper[dataset_index][noise_index]).max().getAsDouble();
			double avgLower = Arrays.stream(l1Lower[dataset_index][noise_index]).filter(d -> d != lowerMax)
					.average().getAsDouble();
			double avgUpper = Arrays.stream(l1Upper[dataset_index][noise_index]).filter(d -> d != upperMax)
					.average().getAsDouble();
			double l1Loss = avgLower + avgUpper;
			System.out.println(datasets[dataset_index] + " " + noise[noise_index] + " " + l1Loss);
		}
	}
}
 
源代码9 项目: AILibs   文件: ExtendedRandomForestTest.java
/**
 * Test the classifier without any cross-validation
 * @throws IOException
 */
@Test
public void testPredict() throws IOException {
	for (int dataset_index = 0; dataset_index < dataset_count; dataset_index++) {
		for (int noise_index = 0; noise_index < noise_count; noise_index++) {
			for (int seed = 0; seed < seedNum; seed++) {
				String testfile_name = this.getTestFileName(dataset_index);
				try (BufferedReader reader = Files.newBufferedReader(Paths.get(testfile_name),
						StandardCharsets.UTF_8)) {
					ArffReader arffReader = new ArffReader(reader);
					Instances data = arffReader.getData();
					List<Double> predictedLowers = new ArrayList<>();
					List<Double> actualLowers = new ArrayList<>();
					List<Double> predictedUppers = new ArrayList<>();
					List<Double> actualUppers = new ArrayList<>();
					for (Instance instance : data) {
						// construct the real interval
						double lower = instance.value(data.numAttributes() - 2);
						double upper = instance.value(data.numAttributes() - 1);
						Instance strippedInstance = new DenseInstance(data.numAttributes() - 2);
						for (int i = 0; i < data.numAttributes() - 2; i++) {
							strippedInstance.setValue(i, instance.value(i));
						}
						Interval actualInterval = new Interval(lower, upper);
						Interval predictedInterval = this.classifier[dataset_index][noise_index][seed]
								.predictInterval(strippedInstance);

						predictedLowers.add(predictedInterval.getInf());
						predictedUppers.add(predictedInterval.getSup());
						actualLowers.add(lower);
						actualUppers.add(upper);
					}

					double l1LossLower = L1Loss(predictedLowers, actualLowers);
					double l1LossUpper = L1Loss(predictedUppers, actualUppers);

					l1Lower[dataset_index][noise_index][seed] = l1LossLower;
					l1Upper[dataset_index][noise_index][seed] = l1LossUpper;

				}
			}
			double avgLower = Arrays.stream(l1Lower[dataset_index][noise_index]).average().getAsDouble();
			double avgUpper = Arrays.stream(l1Upper[dataset_index][noise_index]).average().getAsDouble();
			double l1Loss = (avgLower + avgUpper) / 2;
			System.out.println(datasets[dataset_index] + " " + noise[noise_index] + " " + l1Loss);
		}
	}
}
 
源代码10 项目: NLIWOD   文件: CDTClassifierEvaluation.java
public static void main(String[] args) throws Exception {		
	/*
	 * For multilable classification:
	 */
	
	//load the data
	Path datapath= Paths.get("./src/main/resources/old/Qald6Logs.arff");
	BufferedReader reader = new BufferedReader(new FileReader(datapath.toString()));
	ArffReader arff = new ArffReader(reader);
	Instances data = arff.getData();
	data.setClassIndex(6);
	
    // randomize data
	long seed = System.currentTimeMillis();
	int folds = 100;
	
	String qasystem = "KWGAnswer";
	
	
    Random rand = new Random(seed);
    Instances randData = new Instances(data);
    randData.randomize(rand);
	ArrayList<String> systems = Lists.newArrayList("KWGAnswer", "NbFramework", "PersianQA", "SemGraphQA", "UIQA_withoutManualEntries", "UTQA_English");
	
	
	// perform cross-validation
	Double foldavep = 0.0;
	Double foldaver = 0.0;
	Double foldavef = 0.0;
	Double foldsys = 0.0;

    for (int n = 0; n < folds; n++) {
      Instances train = randData.trainCV(folds, n);
      Instances test = randData.testCV(folds, n);
      // build and evaluate classifier
      PSt pst = new PSt();
      pst.buildClassifier(train);
		float ave_p = 0;
		float ave_r = 0;
		float sysp = 0;
		float sysr = 0;

		for(int j = 0; j < test.size(); j++){
			Instance ins = test.get(j);
			double[] confidences = pst.distributionForInstance(ins);
			int argmax = -1;
			double max = -1;
				for(int i = 0; i < 6; i++){
					if(confidences[i]>max){
						max = confidences[i];
						argmax = i;
					}
				}	
			String sys2ask = systems.get(systems.size() - argmax -1);
			ave_p += Float.parseFloat(loadSystemP(sys2ask).get(j));				
			ave_r += Float.parseFloat(loadSystemR(sys2ask).get(j));
			sysp += Float.parseFloat(loadSystemP(qasystem).get(j));				
			sysr += Float.parseFloat(loadSystemR(sys2ask).get(j));
			}
		double p = ave_p/test.size();
		double r = ave_r/test.size();
		double syspave = sysp/test.size();
		double sysrave = sysr/test.size();
		double sysfmeasure = 2*sysrave*syspave/(sysrave + syspave);
		System.out.println(" RESULT FOR FOLD " + n);
		System.out.println("macro P : " + p);
		System.out.println("macro R : " + r);
		double fmeasure = 2*p*r/(p + r);
		System.out.println("macro F : " + fmeasure + '\n');
		foldavep += p/folds;
		foldaver += r/folds;
		foldavef += fmeasure/folds;
		foldsys += sysfmeasure/folds;
   }
	System.out.println(" RESULT FOR CV ");
	System.out.println("macro aveP : " + foldavep);
	System.out.println("macro aveR : " + foldaver);
	System.out.println("macro aveF : " + foldavef);
	System.out.println("macro aveF " + qasystem + " : " + foldsys);


}
 
源代码11 项目: NLIWOD   文件: LeaveOneOutCV.java
public static void main(String[] args) throws Exception {		
		/*
		 * For multilable classification:
		 */
		//load the data
		Path datapath= Paths.get("./src/main/resources/old/Qald6Logs.arff");
		BufferedReader reader = new BufferedReader(new FileReader(datapath.toString()));
		ArffReader arff = new ArffReader(reader);
		/*
		 * Test the trained system
		 */
		
//		JSONObject qald6test = loadTestQuestions();
//		JSONArray questions = (JSONArray) qald6test.get("questions");
//		ArrayList<String> testQuestions = Lists.newArrayList();
//		for(int i = 0; i < questions.size(); i++){
//			JSONObject questionData = (JSONObject) questions.get(i);
//			JSONArray questionStrings = (JSONArray) questionData.get("question");
//			JSONObject questionEnglish = (JSONObject) questionStrings.get(0);
//			testQuestions.add((String) questionEnglish.get("string"));
//		}

		Instances data = arff.getData();
		data.setClassIndex(6);
		System.out.println();
		double cv_ave = 0;
		ArrayList<String> systems = Lists.newArrayList("KWGAnswer", "NbFramework", "PersianQA", "SemGraphQA", "UIQA_withoutManualEntries", "UTQA_English" );
		for(int i = 0; i < 100; i++){
			Instance testquestion = data.get(i);
			data.remove(i);
			PSt classifier = new PSt();
			classifier.buildClassifier(data);
			double[] confidences = classifier.distributionForInstance(testquestion);

			int argmax = -1;
			double max = -1;
			for(int j = 0; j < 6; j++){
				if(confidences[j]>max){
					max = confidences[j];
					argmax = j;
				}
			}
			String sys2ask = systems.get(systems.size() - argmax -1);
			float p = Float.parseFloat(loadSystemP(sys2ask).get(i));				
			float r = Float.parseFloat(loadSystemR(sys2ask).get(i));
			double f = 0;
			if(p>0&&r>0){f = 2*p*r/(p + r);}
			cv_ave += f;
			data.add(i, testquestion);
		}
		System.out.println(cv_ave/100);
	}
 
源代码12 项目: NLIWOD   文件: TableMaker.java
public static void main(String[] args) throws Exception {				 
	Path datapath= Paths.get("./src/main/resources/old/Qald6Logs.arff");
	BufferedReader reader = new BufferedReader(new FileReader(datapath.toString()));
	ArffReader arff = new ArffReader(reader);
	Instances data = arff.getData();
	data.setClassIndex(6);
	
	//Change To Classifier of Choice
	PSt Classifier = new PSt();
	Classifier.buildClassifier(data);

	
	JSONObject qald6test = Utils.loadTestQuestions();
		JSONArray questions = (JSONArray) qald6test.get("questions");
		ArrayList<String> testQuestions = Lists.newArrayList();
		for(int i = 0; i < questions.size(); i++){
			JSONObject questionData = (JSONObject) questions.get(i);
			JSONArray questionStrings = (JSONArray) questionData.get("question");
			JSONObject questionEnglish = (JSONObject) questionStrings.get(0);
			testQuestions.add((String) questionEnglish.get("string"));
		}
	ArrayList<String> systems = Lists.newArrayList("KWGAnswer", "NbFramework", "PersianQA", "SemGraphQA", "UIQA_withoutManualEntries", "UTQA_English" );
	double avef = 0;
	double[] systemavef = {0,0,0,0,0,0,0};
	for(int i=0; i<data.size(); i++){
		String tmp = "";
		tmp += i +"\t &" + testQuestions.get(i);
		double bestf = 0;
		for(String system: systems){
			double p = Float.parseFloat(Utils.loadSystemP(system).get(i));				
			double r = Float.parseFloat(Utils.loadSystemR(system).get(i));
			double f = 0;
			if(!(p==0&&r==0)){
				f = 2*p*r/(p+r);
			}
			if(f > bestf){
				bestf = f;
			}
			tmp += "\t &" + Math.floor(f * 100) / 100;
			systemavef[systems.indexOf(system)] += f/data.size();
		}
		systemavef[6] += bestf/data.size();
		tmp += "\t &" + Math.floor(bestf * 100) / 100;
		double[] confidences = Classifier.distributionForInstance(data.get(i));
		System.out.println(Arrays.toString(confidences));
		int argmax = -1;
		double max = -1;
			for(int j = 0; j < 6; j++){
				if(confidences[j]>max){
					max = confidences[j];
					argmax = j;
				}
			}
			
		String sys2ask = systems.get(systems.size() - argmax -1);
		double systemp = Float.parseFloat(Utils.loadSystemP(sys2ask).get(i));				
		double systemr = Float.parseFloat(Utils.loadSystemR(sys2ask).get(i));
		double systemf = 0;
		if(!(systemp==0&&systemr==0)){
			systemf = 2*systemp*systemr/(systemp+systemr);
		}
		avef += systemf;
		tmp += "\t &" + Math.floor(systemf * 100) / 100;

		tmp += "\\\\";
		System.out.println(tmp);
	}
	System.out.println(Arrays.toString(systemavef));
	System.out.println(avef/data.size());
}
 
源代码13 项目: NLIWOD   文件: CrossValidationExperiments.java
public static void main(String[] args) throws Exception {		

		Path datapath= Paths.get("./src/main/resources/old/Qald6Logs.arff");
		BufferedReader reader = new BufferedReader(new FileReader(datapath.toString()));
		ArffReader arff = new ArffReader(reader);
		Instances data = arff.getData();
		data.setClassIndex(6);
		
		ArrayList<String> systems = Lists.newArrayList("KWGAnswer", "NbFramework", "PersianQA", "SemGraphQA", "UIQA_withoutManualEntries", "UTQA_English" );


		int seed = 133;
		// Change to 100 for leave-one-out CV
		int folds = 10;
		
		Random rand = new Random(seed);
		Instances randData = new Instances(data);
		randData.randomize(rand);

		float cv_ave_f = 0;
		
		for(int n=0; n < folds; n++){
		    Instances train = randData.trainCV(folds,  n);
		    Instances test = randData.testCV(folds,  n);
		    
		    //Change to the Classifier of your choice
			CDN Classifier = new CDN();
			Classifier.buildClassifier(train);
			

			float ave_p = 0;
			float ave_r = 0;
	
			for(int j = 0; j < test.size(); j++){
				Instance ins = test.get(j);
				int k = 0; 
				for(int l=0; l < data.size(); l++){
					Instance tmp = data.get(l);
					if(tmp.toString().equals(ins.toString())){
						k = l;
					}
				}		
				double[] confidences = Classifier.distributionForInstance(ins);
				int argmax = -1;
				double max = -1;
					for(int i = 0; i < 6; i++){
						if(confidences[i]>max){
							max = confidences[i];
							argmax = i;
						}
				}
				String sys2ask = systems.get(systems.size() - argmax -1);
				ave_p += Float.parseFloat(Utils.loadSystemP(sys2ask).get(k));				
				ave_r += Float.parseFloat(Utils.loadSystemR(sys2ask).get(k));
			}
			
			double p = ave_p/test.size();
			double r = ave_r/test.size();
			double fmeasure = 0;
			if(p>0&&r>0){fmeasure = 2*p*r/(p + r);}
			System.out.println("macro F on fold " + n + ": " + fmeasure);
			
			cv_ave_f += fmeasure/folds;
						
		}
		System.out.println("macro F average: " + cv_ave_f);
		System.out.println('\n');
	}
 
源代码14 项目: NLIWOD   文件: CDTClassifierMultilable.java
public static void main(String[] args) throws Exception {		
/*
 * For multilable classification:
 */

//The classifier
RAkELd PSt_Classifier = new RAkELd();
//load the data
Path datapath= Paths.get("./src/main/resources/old/Qald6Logs.arff");
BufferedReader reader = new BufferedReader(new FileReader(datapath.toString()));
ArffReader arff = new ArffReader(reader);
Instances data = arff.getData();
data.setClassIndex(6);
PSt_Classifier.buildClassifier(data);		/*
 * Test the trained system
 */

JSONObject qald6test = loadTestQuestions();
JSONArray questions = (JSONArray) qald6test.get("questions");
ArrayList<String> testQuestions = Lists.newArrayList();
for(int i = 0; i < questions.size(); i++){
	JSONObject questionData = (JSONObject) questions.get(i);
	JSONArray questionStrings = (JSONArray) questionData.get("question");
	JSONObject questionEnglish = (JSONObject) questionStrings.get(0);
	testQuestions.add((String) questionEnglish.get("string"));
}


ArrayList<String> systems = Lists.newArrayList("KWGAnswer", "NbFramework", "PersianQA", "SemGraphQA", "UIQA_withoutManualEntries", "UTQA_English" );
double ave_f = 0;
Double ave_bestp = 0.0;
Double ave_bestr = 0.0;

for(int j = 0; j < data.size(); j++){
	Instance ins = data.get(j);
	double[] confidences = PSt_Classifier.distributionForInstance(ins);
	int argmax = -1;
	double max = -1;
		for(int i = 0; i < 6; i++){
			if(confidences[i]>max){
				max = confidences[i];
				argmax = i;
			}
		}
		//compare trained system with best possible system
		
		String sys2ask = systems.get(systems.size() - argmax -1);
		float p = Float.parseFloat(loadSystemP(sys2ask).get(j));				
		float r = Float.parseFloat(loadSystemR(sys2ask).get(j));
		
		double bestp = 0;
		double bestr = 0;
		String bestSystemp = "";
		String bestSystemr = ""; 
		for(String system:systems){
			if(Double.parseDouble(loadSystemP(system).get(j)) > bestp){bestSystemp = system; bestp = Double.parseDouble(loadSystemP(system).get(j));}; 
			if(Double.parseDouble(loadSystemR(system).get(j)) > bestr){bestSystemr = system; bestr = Double.parseDouble(loadSystemR(system).get(j));}; 
			}
		ave_bestp += bestp;
		ave_bestr += bestr;
		System.out.println(testQuestions.get(j));
		System.out.println(j + "... asked " + sys2ask + " with p " + loadSystemP(sys2ask).get(j) + "... best possible p: " + bestp + " was achieved by " + bestSystemp);
		System.out.println(j + "... asked " + sys2ask + " with r " + loadSystemR(sys2ask).get(j) + "... best possible r: " + bestr + " was achieved by " + bestSystemr);
		if(p>0&&r>0){ave_f += 2*p*r/(p + r);}

		}
System.out.println("macro F : " + ave_f/data.size());
}
 
源代码15 项目: tsml   文件: Instances.java
/**
 * Reads an ARFF file from a reader, and assigns a weight of
 * one to each instance. Lets the index of the class 
 * attribute be undefined (negative).
 *
 * @param reader the reader
 * @throws IOException if the ARFF file is not read 
 * successfully
 */
public Instances(/*@[email protected]*/Reader reader) throws IOException {
  ArffReader arff = new ArffReader(reader);
  Instances dataset = arff.getData();
  initialize(dataset, dataset.numInstances());
  dataset.copyInstances(0, this, dataset.numInstances());
  compactify();
}
 
源代码16 项目: tsml   文件: Instances.java
/**
 * Reads the header of an ARFF file from a reader and 
 * reserves space for the given number of instances. Lets
 * the class index be undefined (negative).
 *
 * @param reader the reader
 * @param capacity the capacity
 * @throws IllegalArgumentException if the header is not read successfully
 * or the capacity is negative.
 * @throws IOException if there is a problem with the reader.
 * @deprecated instead of using this method in conjunction with the
 * <code>readInstance(Reader)</code> method, one should use the 
 * <code>ArffLoader</code> or <code>DataSource</code> class instead.
 * @see weka.core.converters.ArffLoader
 * @see weka.core.converters.ConverterUtils.DataSource
 */
//@ requires capacity >= 0;
//@ ensures classIndex() == -1;
@Deprecated public Instances(/*@[email protected]*/Reader reader, int capacity)
  throws IOException {

  ArffReader arff = new ArffReader(reader, 0);
  Instances header = arff.getStructure();
  initialize(header, capacity);
  m_Lines = arff.getLineNo();
}
 
源代码17 项目: tsml   文件: Instances.java
/**
 * Reads a single instance from the reader and appends it
 * to the dataset.  Automatically expands the dataset if it
 * is not large enough to hold the instance. This method does
 * not check for carriage return at the end of the line.
 *
 * @param reader the reader 
 * @return false if end of file has been reached
 * @throws IOException if the information is not read 
 * successfully
 * @deprecated instead of using this method in conjunction with the
 * <code>readInstance(Reader)</code> method, one should use the 
 * <code>ArffLoader</code> or <code>DataSource</code> class instead.
 * @see weka.core.converters.ArffLoader
 * @see weka.core.converters.ConverterUtils.DataSource
 */ 
@Deprecated public boolean readInstance(Reader reader) throws IOException {

  ArffReader arff = new ArffReader(reader, this, m_Lines, 1);
  Instance inst = arff.readInstance(arff.getData(), false);
  m_Lines = arff.getLineNo();
  if (inst != null) {
    add(inst);
    return true;
  }
  else {
    return false;
  }
}