代码拉取完成,页面将自动刷新
import org.apache.commons.lang.StringUtils;
import org.apache.spark.SparkConf;
import org.apache.spark.SparkContext;
import org.apache.spark.api.java.JavaPairRDD;
import org.apache.spark.api.java.JavaSparkContext;
import org.apache.spark.api.java.function.PairFlatMapFunction;
import scala.Tuple2;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import java.util.stream.Collectors;
public class InvertedIndex {
public static void main(String [] args) {
SparkConf sparkConf = new SparkConf();
sparkConf.set("spark.master", "local[*]");
sparkConf.set("spark.app.name", "localrun");
SparkContext sparkContext = SparkContext.getOrCreate(sparkConf);
JavaSparkContext javaSparkContext = new JavaSparkContext(sparkContext);
JavaPairRDD<String, String> fileNameContentsRDD = javaSparkContext.wholeTextFiles("/Users/lei.zhu/IdeaProjects/labs/geek-university-bigdata-training-camp/assignment_0815/src/main/resources/input", 1);
JavaPairRDD<String, String> wordFileNameRDD = fileNameContentsRDD.flatMapToPair((PairFlatMapFunction<Tuple2<String, String>, String, String>) fileNameContentPair -> {
String fileName = getFileName(fileNameContentPair._1());
String content = fileNameContentPair._2();
String [] lines = content.split("[\r\n]");
List<Tuple2<String, String>> fileNameWordPairs = new ArrayList<>(lines.length);
for(String line : lines){
String [] wordsInCurrentLine = line.split(" ");
fileNameWordPairs.addAll(Arrays.stream(wordsInCurrentLine).map(word -> new Tuple2<>(word, fileName)).collect(Collectors.toList()));
}
return fileNameWordPairs.iterator();
});
JavaPairRDD<Tuple2<String, String>, Integer> wordFileNameCountPerPairs = wordFileNameRDD.mapToPair(wordFileNamePair -> new Tuple2<>(wordFileNamePair, 1))
.reduceByKey(Integer::sum);
JavaPairRDD<String, Tuple2<String, Integer>> wordCountPerFileNamePairs = wordFileNameCountPerPairs.mapToPair(wordFileNameCountPerPair -> new Tuple2<>(wordFileNameCountPerPair._1._1, new Tuple2<>(wordFileNameCountPerPair._1._2, wordFileNameCountPerPair._2)));
JavaPairRDD<String, String> result = wordCountPerFileNamePairs.groupByKey().mapToPair(wordCountPerFileNamePairIterator -> new Tuple2<>(wordCountPerFileNamePairIterator._1, StringUtils.join(wordCountPerFileNamePairIterator._2.iterator(), ','))).sortByKey();
for(Tuple2<String, String> pair : result.collect()) {
System.out.printf("\"%s\", {%s}%n", pair._1, pair._2);
}
}
private static String getFileName(String s) {
return s.substring(s.lastIndexOf('/') + 1 );
}
}
此处可能存在不合适展示的内容,页面不予展示。您可通过相关编辑功能自查并修改。
如您确认内容无涉及 不当用语 / 纯广告导流 / 暴力 / 低俗色情 / 侵权 / 盗版 / 虚假 / 无价值内容或违法国家有关法律法规的内容,可点击提交进行申诉,我们将尽快为您处理。