/*
* To change this template, choose Tools | Templates
* and open the template in the editor.
*/
package sssp_hadoop;
import java.io.IOException;
import java.util.StringTokenizer;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.input.TextInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.hadoop.mapreduce.lib.output.SequenceFileOutputFormat;
/**
*
* @author Sanghyun Lee
*/
public class SSSP_Hadoop {
/**
* @param args the command line arguments
*/
public static void main(String[] args) throws IOException, InterruptedException, ClassNotFoundException {
// TODO code application logic here
Configuration conf = new Configuration();
// args[0] = input path;
// args[1] = output path;
Job job = getMakeADJListJob(conf, new Path(args[0]), new Path(args[1]));
job.waitForCompletion(true);
}
private static Job getMakeADJListJob(Configuration conf, Path input, Path output) throws IOException {
Job job = new Job(conf);
FileSystem fs = FileSystem.get(conf);
job.setJarByClass(SSSP_Hadoop.class);
job.setJobName("Make Adjacency List");
job.setOutputKeyClass(Text.class);
job.setOutputValueClass(Text.class);
job.setMapperClass(ADJMapper.class);
job.setReducerClass(ADJReducer.class);
if (fs.exists(output)) {
fs.delete(output, true);
}
job.setInputFormatClass(TextInputFormat.class);
// job.setOutputFormatClass(TextOutputFormat.class);
job.setOutputFormatClass(SequenceFileOutputFormat.class);
FileInputFormat.addInputPath(job, input);
FileOutputFormat.setOutputPath(job, output);
return job;
}
public static class ADJMapper extends Mapper<LongWritable, Text, Text, Text> {
@Override
public void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException {
}
}
public static class ADJReducer extends Reducer<Text, Text, Text, Text> {
@Override
public void reduce(Text key, Iterable<Text> value, Context context) throws IOException, InterruptedException {
}
}
}
'콤퓨타 > Hadoop' 카테고리의 다른 글
리듀스의 출력이 Text, Text 일 때, 알파벳 역순으로 출력되게 하는 Comparator (0) | 2013.02.20 |
---|
댓글