Java 我们可以在mapreduce代码中的mapper类的设置方法中放入一些计算任务吗

Java 我们可以在mapreduce代码中的mapper类的设置方法中放入一些计算任务吗,java,hadoop,mapreduce,apriori,Java,Hadoop,Mapreduce,Apriori,我在mapper类中使用了setup()方法。还有一个用户定义的方法aprioriGenK()在mapper类中定义,并在map()方法中调用 现在的问题是:无论我知道什么,map方法都会为每一行输入调用。假设有100行,那么这个方法调用了100次。映射方法每次相应地调用aprioriGenK方法。但每次调用map方法时,不需要在map方法内部调用aprioriGenK。i、 e.aprioriGenK方法的结果是map方法所有输入行的共同结果。aprioriGenK方法非常占用cpu,因此反复

我在mapper类中使用了setup()方法。还有一个用户定义的方法aprioriGenK()在mapper类中定义,并在map()方法中调用

现在的问题是:无论我知道什么,map方法都会为每一行输入调用。假设有100行,那么这个方法调用了100次。映射方法每次相应地调用aprioriGenK方法。但每次调用map方法时,不需要在map方法内部调用aprioriGenK。i、 e.aprioriGenK方法的结果是map方法所有输入行的共同结果。aprioriGenK方法非常占用cpu,因此反复调用会增加计算时间。我们能否设法一次性调用aprioriGenK并在每次map方法中使用它。 我曾尝试将aprioriGen保留在setup方法中,以便只调用一次,但令人惊讶的是,它在很大程度上降低了执行速度

这是我的密码:

import dataStructuresV2.ItemsetTrie;

public class AprioriTrieMapper extends Mapper<Object, Text, Text, IntWritable>
{
    public static enum State
    {
        UPDATED
    }

    private final static IntWritable one = new IntWritable(1);
    private Text itemset = new Text();

    private Configuration conf;
    private StringTokenizer fitemset;   // store one line of previous output file of frequent itemsets
    private ItemsetTrie trieLk_1 = null;    // prefix tree to store candidate (k-1)-itemsets of previous pass
    private int k;                      // itemsetSize or iteration no.
//  private ItemsetTrie trieCk = null;          // prefix tree to store candidate k-itemsets

    public void setup(Context context) throws IOException, InterruptedException
    {
        conf = context.getConfiguration();
        URI[] previousOutputURIs = Job.getInstance(conf).getCacheFiles();
        k = conf.getInt("k", k);
        trieLk_1 = new ItemsetTrie();

        for (URI previousOutputURI : previousOutputURIs)
        {
            Path previousOutputPath = new Path(previousOutputURI.getPath());
            String previousOutputFileName = previousOutputPath.getName().toString();
            filterItemset(previousOutputFileName, trieLk_1);
        }
    //  trieCk = aprioriGenK(trieLk_1, k-1);    // candidate generation from prefix tree of size k-1
    }// end method setup

    //trim count from each line and store only itemset
    private void filterItemset(String fileName, ItemsetTrie trieLk_1)
    {
        try 
        {
          BufferedReader fis = new BufferedReader(new FileReader(fileName));
          String line = null;
        //  trieLk_1 = new ItemsetTrie();

          while ((line = fis.readLine()) != null)
          {
              fitemset = new StringTokenizer(line, "\t");
              trieLk_1.insertCandidateItemset(fitemset.nextToken());
          }
          fis.close();
        }
        catch (IOException ioe)
        {
          System.err.println("Caught exception while parsing the cached file '" + fileName + "' : " + StringUtils.stringifyException(ioe));
        }
    }// end method filterItemset

    public void map(Object key, Text value, Context context) throws IOException, InterruptedException 
    {
        StringTokenizer items = new StringTokenizer(value.toString().toLowerCase()," \t\n\r\f,.:;?![]'"); // tokenize transaction
        LinkedList <String>itemlist = new LinkedList<String>(); // store the tokens or itemse of transaction

        LinkedList <String>listCt;      // list of subset of transaction that are candidates
    //  Map <String, Integer>mapCt;     // list of subset of transaction that are candidates with support count
        ItemsetTrie trieCk = null;          // prefix tree to store candidate k-itemsets
        StringTokenizer candidate;

        trieCk = aprioriGenK(trieLk_1, k-1);        // candidate generation from prefix tree of size k-1

        if(trieCk.numberOfCandidate() > 0)
            context.getCounter(State.UPDATED).increment(1);     // increment counter

        // optimization: if transaction size is less than candidate size then it should not be checked
        if(items.countTokens() >= k)
        {
            while (items.hasMoreTokens())               // add tokens of transaction to list
                itemlist.add(items.nextToken());

            // we use either simple linkedlist listCt or map mapCt
            listCt = trieCk.candidateSupportCount1(itemlist, k);
            for(String listCtMember : listCt)   // generate (key, value) pair. work on listCt
            {
                candidate = new StringTokenizer(listCtMember, "\n");
                if(candidate.hasMoreTokens())
                {
                    itemset.set(candidate.nextToken()); context.write(itemset, one);
                }
            }
        } // end if
    } // end method map

    // generating candidate prefix tree of size k using prefix tree of size k-1
    public ItemsetTrie aprioriGenK(ItemsetTrie trieLk_1, int itemsetSize)   // itemsetSize of trie Lk_1
    {
        ItemsetTrie candidateTree = new ItemsetTrie();      // local prefix tree store candidates k-itemsets
        trieLk_1.candidateGenK(candidateTree, itemsetSize); // new candidate prefix tree obtained
        return candidateTree;                               // return prefix tree of size k
    } // end method aprioriGenK
} //end class TrieBasedSPCItemsetMapper
导入数据结构v2.ItemsetTrie;
公共类AprioriteMapper扩展了Mapper
{
公共静态枚举状态
{
更新
}
私有最终静态IntWritable one=新的IntWritable(1);
私有文本项集=新文本();
私有配置配置;
private StringTokenizer fitemset;//存储一行以前的频繁项集输出文件
private ItemsetTrie trieLk_1=null;//用于存储候选项(k-1)的前缀树-上一个过程的项集
private int k;//项目设置或迭代编号。
//private-ItemsetTrie-trieCk=null;//用于存储候选k-itemset的前缀树
公共无效设置(上下文上下文)引发IOException、InterruptedException
{
conf=context.getConfiguration();
URI[]PreviousOutputURI=Job.getInstance(conf.getCacheFiles();
k=conf.getInt(“k”,k);
trieLk_1=新项目集trie();
for(URI previousOutputURI:previousOutputURI)
{
路径previousOutputPath=新路径(previousOutputURI.getPath());
字符串previousOutputFileName=previousOutputPath.getName().toString();
filterItemset(previousOutputFileName,trieLk_1);
}
//trieCk=aprioriGenK(trieLk_1,k-1);//从大小为k-1的前缀树生成候选
}//结束方法设置
//修剪每行的计数并仅存储itemset
私有void filterItemset(字符串文件名,ItemsetTrie trieLk_1)
{
尝试
{
BufferedReader fis=新的BufferedReader(新文件读取器(文件名));
字符串行=null;
//trieLk_1=新项目集trie();
而((line=fis.readLine())!=null)
{
fitemset=新的StringTokenizer(行“\t”);
trieLk_1.insertCandidateItemset(fitemset.nextToken());
}
fis.close();
}
捕获(ioe异常ioe)
{
System.err.println(“分析缓存文件“+fileName+”:“+StringUtils.stringifyException(ioe))时捕获异常”;
}
}//结束方法filterItemset
公共void映射(对象键、文本值、上下文上下文)引发IOException、InterruptedException
{
StringTokenizer items=new StringTokenizer(value.toString().toLowerCase(),“\t\n\r\f,.:;?![]”;//标记化事务
LinkedList itemlist=new LinkedList();//存储事务的令牌或项
LinkedList listCt;//候选事务子集的列表
//Map mapCt;//具有支持计数的候选事务子集列表
ItemsetTrie trieCk=null;//用于存储候选k-ItemSet的前缀树
StringTokenizer候选者;
trieCk=aprioriGenK(trieLk_1,k-1);//从大小为k-1的前缀树生成候选
如果(trieCk.numberOfCandidate()>0)
context.getCounter(State.UPDATED).increment(1);//increment counter
//优化:如果事务大小小于候选大小,则不应检查它
if(items.countTokens()>=k)
{
while(items.hasMoreTokens())//将事务的令牌添加到列表中
itemlist.add(items.nextToken());
//我们使用简单的linkedlist listCt或map mapCt
listCt=trieCk.candidateSupportCount1(itemlist,k);
for(String listCtMember:listCt)//生成(键,值)对。处理listCt
{
候选项=新的StringTokenizer(listCtMember,“\n”);
if(candidate.hasMoreTokens())
{
itemset.set(candidate.nextToken());context.write(itemset,one);
}
}
}//如果结束,则结束
}//结束方法映射
//使用大小为k-1的前缀树生成大小为k的候选前缀树
public ItemsetTrie aprioriGenK(ItemsetTrie trieLk_1,int itemsetize)//trie Lk_1的itemsetize
{
ItemsetTrie candidateTree=new ItemsetTrie();//本地前缀树存储候选k-itemsets
trieLk_1.candidateGenK(candidateTree,itemsetSize);//获得新的候选前缀树
return candidateTree;//返回大小为k的前缀树
}//结束方法aprioriGenK
}//结束类TrieBasedSPCItemsetMapper
这是我的驾驶课:

公共类先验知识 { 私有静态记录器log=Logger.getLogger(aprioritie.class)

publicstaticvoidmain(字符串[]args)引发异常
{
Configuration conf=新配置();
//字符串minsup=“1”;
字符串minsup=null;
List otherArgs=new ArrayList();
对于(int i=0;ipublic static void main(String[] args) throws Exception
{
    Configuration conf = new Configuration();

//  String minsup = "1";
    String minsup = null;
    List<String> otherArgs = new ArrayList<String>();
    for (int i=0; i < args.length; ++i)
    {
        if ("-minsup".equals(args[i]))
            minsup = args[++i];
        else
            otherArgs.add(args[i]);
    }

    conf.set("min_sup", minsup);

    log.info("Started counting 1-itemset ....................");
    Date date; long startTime, endTime;                         // for recording start and end time of job
    date = new Date(); startTime = date.getTime();              // starting timer

    // Phase-1
    Job job = Job.getInstance(conf, "AprioriTrie: Iteration-1");
    job.setJarByClass(aprioriBasedAlgorithms.AprioriTrie.class);

    job.setMapperClass(OneItemsetMapper.class);
    job.setCombinerClass(OneItemsetCombiner.class);
    job.setReducerClass(OneItemsetReducer.class);

//  job.setOutputKeyClass(Text.class);
    job.setOutputKeyClass(IntWritable.class);
    job.setOutputValueClass(IntWritable.class);

    job.setInputFormatClass(NLineInputFormat.class);
    NLineInputFormat.setNumLinesPerSplit(job, 10000);   // set specific no. of line of records

//  Path inputPath = new Path("hdfs://hadoopmaster:9000/user/hduser/sample-transactions1/");
    Path inputPath = new Path(otherArgs.get(0));
//  Path outputPath = new Path("hdfs://hadoopmaster:9000/user/hduser/AprioriTrie/fis-1");
    Path outputPath = new Path(otherArgs.get(1)+"/fis-1");

    FileInputFormat.setInputPaths(job, inputPath);
    FileOutputFormat.setOutputPath(job, outputPath);            

    if(job.waitForCompletion(true))
        log.info("SUCCESSFULLY- Completed Frequent 1-itemsets Geneation.");
    else
        log.info("ERROR- Completed Frequent 1-itemsets Geneation.");

    // Phase-k >=2
    int iteration = 1; long counter;
    do
    {
        Configuration conf2 = new Configuration();
        conf2.set("min_sup", minsup);
        conf2.setInt("k", iteration+1);

        log.info("Started counting "+(iteration+1)+"-itemsets ..................");
        Job job2 = Job.getInstance(conf2, "AprioriTrie: Iteration-"+(iteration+1));
        job2.setJarByClass(aprioriBasedAlgorithms.AprioriTrie.class);

        job2.setMapperClass(AprioriTrieMapper.class);
        job2.setCombinerClass(ItemsetCombiner.class);
        job2.setReducerClass(ItemsetReducer.class);

        job2.setOutputKeyClass(Text.class);
        job2.setOutputValueClass(IntWritable.class);

        job2.setNumReduceTasks(4); // break the output in 3 files

        job2.setInputFormatClass(NLineInputFormat.class);
        NLineInputFormat.setNumLinesPerSplit(job2, 10000);

        FileSystem fs = FileSystem.get(new URI("hdfs://hadoopmaster:9000"), conf2);
    //  FileStatus[] status = fs.listStatus(new Path("hdfs://hadoopmaster:9000/user/hduser/AprioriTrie/fis-"+iteration+"/"));
        FileStatus[] status = fs.listStatus(new Path(otherArgs.get(1)+"/fis-"+iteration));
        for (int i=0;i<status.length;i++)
        {
            job2.addCacheFile(status[i].getPath().toUri()); // add all files inside output fis
            //job2.addFileToClassPath(status[i].getPath());
        }

    //  input is same for these job
    //  outputPath = new Path("hdfs://hadoopmaster:9000/user/hduser/AprioriTrie/fis-"+(iteration+1));
        outputPath = new Path(otherArgs.get(1)+"/fis-"+(iteration+1));

        FileInputFormat.setInputPaths(job2, inputPath);
        FileOutputFormat.setOutputPath(job2, outputPath);

        if(job2.waitForCompletion(true))
            log.info("SUCCESSFULLY- Completed Frequent "+(iteration+1)+"-itemsets Generation.");
        else
            log.info("ERROR- Completed Frequent "+(iteration+1)+"-itemsets Generation.");

        iteration++;
        counter = job2.getCounters().findCounter(AprioriTrieMapper.State.UPDATED).getValue();
    } while (counter > 0);

    date = new Date(); endTime = date.getTime();                    //end timer
    log.info("Total Time (in milliseconds) = "+ (endTime-startTime));
    log.info("Total Time (in seconds) = "+ (endTime-startTime)*0.001F);
}
public class Mymapper extends Mapper<LongWritable,Text,Text,IntWritable> 
{
    public void map(LongWritable key,Text value,Context context) throws IOException,InterruptedException
    {
               //do something

    }
    public void myfunc(String parm)
    {
        System.out.println("parm="+parm);
    }
    public void run(Context context) throws IOException, InterruptedException 
    {
        setup(context);
        myfunc("hello");
        while(context.nextKeyValue())
        {
            map(context.getCurrentKey(), context.getCurrentValue(), context);
        }

    }

}
public class AprioriTrieMapper extends Mapper<Object, Text, Text, IntWritable>
{
public static enum State
{
    UPDATED
}

private final static IntWritable one = new IntWritable(1);
private Text itemset = new Text();

private Configuration conf;
private StringTokenizer fitemset;   // store one line of previous output file of frequent itemsets
private ItemsetTrie trieLk_1 = null;    // prefix tree to store candidate (k-1)-itemsets of previous pass
private int k;                      // itemsetSize or iteration no.
private ItemsetTrie trieCk = null;          // prefix tree to store candidate k-itemsets

public void setup(Context context) throws IOException, InterruptedException
{
    conf = context.getConfiguration();
    URI[] previousOutputURIs = Job.getInstance(conf).getCacheFiles();
    k = conf.getInt("k", k);
    trieLk_1 = new ItemsetTrie();

    for (URI previousOutputURI : previousOutputURIs)
    {
        Path previousOutputPath = new Path(previousOutputURI.getPath());
        String previousOutputFileName = previousOutputPath.getName().toString();
        filterItemset(previousOutputFileName, trieLk_1);
    }
//  trieCk = aprioriGenK(trieLk_1, k-1);    // candidate generation from prefix tree of size k-1
}// end method setup

//trim count from each line and store only itemset
private void filterItemset(String fileName, ItemsetTrie trieLk_1)
{
    try 
    {
      BufferedReader fis = new BufferedReader(new FileReader(fileName));
      String line = null;
    //  trieLk_1 = new ItemsetTrie();

      while ((line = fis.readLine()) != null)
      {
          fitemset = new StringTokenizer(line, "\t");
          trieLk_1.insertCandidateItemset(fitemset.nextToken());
      }
      fis.close();
    }
    catch (IOException ioe)
    {
      System.err.println("Caught exception while parsing the cached file '" + fileName + "' : " + StringUtils.stringifyException(ioe));
    }
}// end method filterItemset

//run method
public void run(Context context) throws IOException, InterruptedException
{
    setup(context);
    trieCk = aprioriGenK(trieLk_1, k-1);    // candidate generation from prefix tree of size k-1

    if(trieCk.numberOfCandidate() > 0)
        context.getCounter(State.UPDATED).increment(1);     // increment counter

    while(context.nextKeyValue())
    {
        map(context.getCurrentKey(), context.getCurrentValue(), context);
    }
}// end method run

public void map(Object key, Text value, Context context) throws IOException, InterruptedException 
{
    StringTokenizer items = new StringTokenizer(value.toString().toLowerCase()," \t\n\r\f,.:;?![]'"); // tokenize transaction
    LinkedList <String>itemlist = new LinkedList<String>(); // store the tokens or itemse of transaction

    LinkedList <String>listCt;      // list of subset of transaction that are candidates
//  Map <String, Integer>mapCt;     // list of subset of transaction that are candidates with support count
//  ItemsetTrie trieCk = null;          // prefix tree to store candidate k-itemsets
    StringTokenizer candidate;

//  if(context.getCounter(State.UPDATED).getValue() == 0)
//  {
//      trieCk = aprioriGenK(trieLk_1, k-1);    // candidate generation from prefix tree of size k-1

    //  if(trieCk.numberOfCandidate() > 0)
        //  context.getCounter(State.UPDATED).increment(1);     // increment counter
//  }

    // optimization: if transaction size is less than candidate size then it should not be checked
    if(items.countTokens() >= k)
    {
        while (items.hasMoreTokens())               // add tokens of transaction to list
            itemlist.add(items.nextToken());

        // we use either simple linkedlist listCt or map mapCt
        listCt = trieCk.candidateSupportCount1(itemlist, k);
        for(String listCtMember : listCt)   // generate (key, value) pair. work on listCt
        {
            candidate = new StringTokenizer(listCtMember, "\n");
            if(candidate.hasMoreTokens())
            {
                itemset.set(candidate.nextToken()); context.write(itemset, one);
            }
        }
    } // end if
} // end method map

// generating candidate prefix tree of size k using prefix tree of size k-1
public ItemsetTrie aprioriGenK(ItemsetTrie trieLk_1, int itemsetSize)   // itemsetSize of trie Lk_1
{
    ItemsetTrie candidateTree = new ItemsetTrie();      // local prefix tree store candidates k-itemsets
    trieLk_1.candidateGenK(candidateTree, itemsetSize); // new candidate prefix tree obtained
    return candidateTree;                               // return prefix tree of size k
} // end method aprioriGenK
} //end class TrieBasedSPCItemsetMapper