Warning: file_get_contents(/data/phpspider/zhask/data//catemap/9/java/350.json): failed to open stream: No such file or directory in /data/phpspider/zhask/libs/function.php on line 167

Warning: Invalid argument supplied for foreach() in /data/phpspider/zhask/libs/tag.function.php on line 1116

Notice: Undefined index: in /data/phpspider/zhask/libs/function.php on line 180

Warning: array_chunk() expects parameter 1 to be array, null given in /data/phpspider/zhask/libs/function.php on line 181
Java 完成任务后显示RAM活动增加的Jar_Java_Class - Fatal编程技术网

Java 完成任务后显示RAM活动增加的Jar

Java 完成任务后显示RAM活动增加的Jar,java,class,Java,Class,我有一个类KeywordCount,它标记给定的句子,并使用ApacheOpenNLP POS标记器的maxent标记器对其进行标记。我首先标记输出,然后将其提供给标记器。在jar完成任务后,我有一个高达165MB的RAM使用问题。程序的其余部分只是进行DB调用并检查新任务。我已经隔离了这个班级的漏洞。您可以安全地忽略ApachePOI Excel代码。我需要知道你们中是否有人能找到代码中的漏洞 public class KeywordCount { Task task; String task

我有一个类KeywordCount,它标记给定的句子,并使用ApacheOpenNLP POS标记器的maxent标记器对其进行标记。我首先标记输出,然后将其提供给标记器。在jar完成任务后,我有一个高达165MB的RAM使用问题。程序的其余部分只是进行DB调用并检查新任务。我已经隔离了这个班级的漏洞。您可以安全地忽略ApachePOI Excel代码。我需要知道你们中是否有人能找到代码中的漏洞

public class KeywordCount {
Task task;
String taskFolder = "";
List<String> listOfWords;

public KeywordCount(String taskFolder) {
    this.taskFolder = taskFolder;
    listOfWords = new ArrayList<String>();
}

public void tagText() throws Exception {
    String xlsxOutput = taskFolder + File.separator + "results_pe.xlsx";

    FileInputStream fis = new FileInputStream(new File(xlsxOutput));
    XSSFWorkbook wb = new XSSFWorkbook(fis);
    XSSFSheet sheet = wb.createSheet("Keyword Count");
    XSSFRow row = sheet.createRow(0);
    Cell cell = row.createCell(0);

    XSSFCellStyle csf = (XSSFCellStyle)wb.createCellStyle();
    csf.setVerticalAlignment(CellStyle.VERTICAL_TOP);
    csf.setBorderBottom(CellStyle.BORDER_THICK);
    csf.setBorderRight(CellStyle.BORDER_THICK);
    csf.setBorderTop(CellStyle.BORDER_THICK);
    csf.setBorderLeft(CellStyle.BORDER_THICK);
    Font fontf = wb.createFont();
    fontf.setColor(IndexedColors.GREEN.getIndex());
    fontf.setBoldweight(Font.BOLDWEIGHT_BOLD);
    csf.setFont(fontf);



    int rowNum = 0;
    BufferedReader br = null;
    InputStream modelIn = null;
    POSModel model = null;
    try {
      modelIn = new FileInputStream("taggers" + File.separator + "en-pos-maxent.bin");
      model = new POSModel(modelIn);
    }
    catch (IOException e) {
      // Model loading failed, handle the error
      e.printStackTrace();
    }
    finally {
      if (modelIn != null) {
        try {
          modelIn.close();
        }
        catch (IOException e) {
        }
      }
    }
    File ftmp = new File(taskFolder + File.separator + "phrase_tmp.txt");
    if(ftmp.exists()) {
        br = new BufferedReader(new FileReader(ftmp));
        POSTaggerME tagger = new POSTaggerME(model);
        String line = "";
        while((line = br.readLine()) != null) {
            if (line.equals("")) {
                break;
            }
            row = sheet.createRow(rowNum++);
            if(line.startsWith("Match")) {
                int index = line.indexOf(":");
                line = line.substring(index + 1);
                String[] sent = getTokens(line);
                String[] tags = tagger.tag(sent); 
                for(int i = 0; i < tags.length; i++) {
                    if (tags[i].equals("NN") || tags[i].equals("NNP") || tags[i].equals("NNS") || tags[i].equals("NNPS")) {
                        listOfWords.add(sent[i].toLowerCase());
                    } else if (tags[i].equals("JJ") || tags[i].equals("JJR") || tags[i].equals("JJS")) {
                        listOfWords.add(sent[i].toLowerCase());
                    }
                }

                Map<String, Integer> treeMap = new TreeMap<String, Integer>();
                for(String temp : listOfWords) {
                    Integer counter = treeMap.get(temp);
                    treeMap.put(temp, (counter == null) ? 1 : counter + 1);
                }
                listOfWords.clear();
                sent = null;
                tags = null;
                if (treeMap != null || !treeMap.isEmpty()) {
                    for(Map.Entry<String, Integer> entry : treeMap.entrySet()) {
                        row = sheet.createRow(rowNum++);
                        cell = row.createCell(0);
                        cell.setCellValue(entry.getKey().substring(0, 1).toUpperCase() + entry.getKey().substring(1));
                        XSSFCell cell1 = row.createCell(1);
                        cell1.setCellValue(entry.getValue());
                    }
                    treeMap.clear();
                }
                treeMap = null;
            }
            rowNum++;
        }
        br.close();
        tagger = null;
        model = null;
    }
    sheet.autoSizeColumn(0);
    fis.close();

    FileOutputStream fos = new FileOutputStream(new File(xlsxOutput));
    wb.write(fos);
    fos.close();
    System.out.println("Finished writing XLSX file for Keyword Count!!");
}

public String[] getTokens(String match) throws Exception {
    InputStream modelIn = new FileInputStream("taggers" + File.separator + "en-token.bin");
    TokenizerModel model = null;
    try {
      model = new TokenizerModel(modelIn);
    }
    catch (IOException e) {
      e.printStackTrace();
    }
    finally {
      if (modelIn != null) {
        try {
          modelIn.close();
        }
        catch (IOException e) {
        }
      }
    }

    Tokenizer tokenizer = new TokenizerME(model);
    String tokens[] = tokenizer.tokenize(match);
    model = null;

    return tokens;
}
公共类关键字计数{
任务;
字符串taskFolder=“”;
词汇表;
公钥计数(字符串任务文件夹){
this.taskFolder=taskFolder;
listOfWords=newarraylist();
}
public void tagText()引发异常{
字符串xlsxOutput=taskFolder+File.separator+“results\u pe.xlsx”;
FileInputStream fis=新的FileInputStream(新文件(xlsxOutput));
XSSF工作簿wb=新XSSF工作簿(fis);
XSSFSheet sheet=wb.createSheet(“关键字计数”);
XSSFRow row=sheet.createRow(0);
Cell Cell=row.createCell(0);
XSSFCellStyle csf=(XSSFCellStyle)wb.createCellStyle();
csf.垂直排列(细胞型,垂直顶部);
脑脊液。底部(细胞型。边缘_厚);
右(细胞型,边缘较厚);
顶部(细胞型,边缘较厚);
csf.左侧(细胞型.边缘较厚);
Font fontf=wb.createFont();
setColor(IndexedColors.GREEN.getIndex());
fontf.setBoldweight(Font.BOLDWEIGHT\u BOLD);
csf.setFont(fontf);
int rowNum=0;
BufferedReader br=null;
InputStream modelIn=null;
POSModel=null;
试一试{
modelIn=newfileinputstream(“标记器”+File.separator+“en pos maxent.bin”);
model=新的POSModel(modelIn);
}
捕获(IOE异常){
//模型加载失败,请处理错误
e、 printStackTrace();
}
最后{
if(modelIn!=null){
试一试{
modelIn.close();
}
捕获(IOE异常){
}
}
}
File ftmp=新文件(taskFolder+File.separator+“phrase_tmp.txt”);
如果(ftmp.exists()){
br=新的BufferedReader(新的文件读取器(ftmp));
POSTaggerME tagger=新的POSTaggerME(型号);
字符串行=”;
而((line=br.readLine())!=null){
if(第行等于(“”){
打破
}
row=sheet.createRow(rowNum++);
if(行开始与(“匹配”)){
int index=line.indexOf(“:”);
行=行。子字符串(索引+1);
字符串[]已发送=getTokens(行);
String[]tags=tagger.tag(已发送);
for(int i=0;i
}


我的系统在165MB之后对RAM进行了GCD…但是当我上传到服务器时,GC没有执行,它会上升到480MB(RAM使用量的49%)

首先,堆使用率的增加不是内存泄漏的证据。可能只是GC还没有运行

话虽如此,任何人只要“盯着”你的代码就能发现内存泄漏,这是值得怀疑的。解决这个问题的正确方法是为>>你>你