Skip to content

Commit 7b9f68b

Browse files
Minor refractoring
1 parent f98c776 commit 7b9f68b

File tree

3 files changed

+0
-37
lines changed

3 files changed

+0
-37
lines changed

src/main/java/exercise_2/Exercise_2.java

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -141,5 +141,4 @@ public static void shortestPaths(JavaSparkContext ctx) {
141141
Utils.print("Minimum cost to get from '" + srcLabel + "' to '" + descLabel + "' is " + cost);
142142
});
143143
}
144-
145144
}

src/main/java/exercise_3/Exercise_3.java

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -13,7 +13,6 @@
1313
import scala.runtime.AbstractFunction1;
1414
import scala.runtime.AbstractFunction2;
1515
import scala.runtime.AbstractFunction3;
16-
import shapeless.Tuple;
1716

1817
import java.io.Serializable;
1918
import java.util.ArrayList;
@@ -27,7 +26,6 @@
2726
public class Exercise_3 {
2827

2928
// Initial value for pregel execution
30-
// static final Tuple2<Integer, List<String>> INITIAL_VALUE = new Tuple2<Integer, List<String>>(Integer.MAX_VALUE, new ArrayList<String>());
3129
static final Vertex INITIAL_VALUE = new Vertex(Integer.MAX_VALUE);
3230

3331
// Nodes' Labels

src/main/java/exercise_4/Exercise_4.java

Lines changed: 0 additions & 34 deletions
Original file line numberDiff line numberDiff line change
@@ -3,8 +3,6 @@
33
import org.apache.spark.api.java.JavaRDD;
44
import org.apache.spark.api.java.JavaSparkContext;
55
import org.apache.spark.sql.Dataset;
6-
import org.apache.spark.sql.Encoder;
7-
import org.apache.spark.sql.Encoders;
86
import org.apache.spark.sql.Row;
97
import org.apache.spark.sql.RowFactory;
108
import org.apache.spark.sql.SQLContext;
@@ -15,12 +13,7 @@
1513
import org.apache.spark.rdd.RDD;
1614
import org.graphframes.GraphFrame;
1715
import java.util.stream.IntStream;
18-
import java.io.IOException;
19-
import java.nio.file.Files;
20-
import java.nio.file.Path;
21-
import java.nio.file.Paths;
2216
import java.util.ArrayList;
23-
import java.util.Arrays;
2417
import java.util.List;
2518
import utils.Utils;
2619

@@ -51,14 +44,7 @@ public static void wikipedia(JavaSparkContext ctx, SQLContext sqlCtx) {
5144

5245
Utils.line_separator();
5346

54-
// graphFrame.edges().show();
55-
// graphFrame.vertices().show();
56-
57-
// @todo: Benchmark with multiple dumping factor and numIterations
58-
// --- Ideas ---
59-
// 1. Define some dumping factors and numIterations and run & time pagerank algo. and select the best
6047
// For dumping factor
61-
// List<String> time = new ArrayList<String>(Arrays.asList(new String[120]));
6248
List<Row> timeList = new ArrayList<Row>();
6349
StructType outputSchema = new StructType(new StructField[] {
6450
new StructField("dumping_factor", DataTypes.DoubleType, false, new MetadataBuilder().build()),
@@ -84,7 +70,6 @@ public static void wikipedia(JavaSparkContext ctx, SQLContext sqlCtx) {
8470
Utils.print(log);
8571
topVertices.show(10);
8672
Utils.line_separator();
87-
// time.add(log);
8873
});
8974
});
9075

@@ -93,24 +78,5 @@ public static void wikipedia(JavaSparkContext ctx, SQLContext sqlCtx) {
9378
Long count = output.count();
9479
output.show(count.intValue());
9580
Utils.line_separator();
96-
97-
// try {
98-
// Files.write(Paths.get("/root/SDM-Lab-2/src/main/java/exercise_4/output.txt"), time);
99-
// } catch (IOException e) {
100-
// Utils.print("Unable to save file" + e);
101-
// }
102-
103-
// GraphFrame gf = graphFrame.pageRank().tol(0.01).resetProbability(0.15).run();
104-
// GraphFrame gf = graphFrame.pageRank().resetProbability(0.15).maxIter(10).run();
105-
106-
// Utils.line_separator();
107-
108-
// gf.edges().show();
109-
// gf.vertices().show();
110-
111-
// Utils.line_separator();
112-
// Dataset<Row> topVertices = gf.vertices().sort(org.apache.spark.sql.functions.desc("pagerank"));
113-
// topVertices.show(10);
114-
11581
}
11682
}

0 commit comments

Comments
 (0)