Warning: file_get_contents(/data/phpspider/zhask/data//catemap/9/java/384.json): failed to open stream: No such file or directory in /data/phpspider/zhask/libs/function.php on line 167

Warning: Invalid argument supplied for foreach() in /data/phpspider/zhask/libs/tag.function.php on line 1116

Notice: Undefined index: in /data/phpspider/zhask/libs/function.php on line 180

Warning: array_chunk() expects parameter 1 to be array, null given in /data/phpspider/zhask/libs/function.php on line 181

Warning: file_get_contents(/data/phpspider/zhask/data//catemap/6/entity-framework/4.json): failed to open stream: No such file or directory in /data/phpspider/zhask/libs/function.php on line 167

Warning: Invalid argument supplied for foreach() in /data/phpspider/zhask/libs/tag.function.php on line 1116

Notice: Undefined index: in /data/phpspider/zhask/libs/function.php on line 180

Warning: array_chunk() expects parameter 1 to be array, null given in /data/phpspider/zhask/libs/function.php on line 181
Java 无法解析org.apache.commons.logging.Log_Java_Eclipse_Hadoop - Fatal编程技术网

Java 无法解析org.apache.commons.logging.Log

Java 无法解析org.apache.commons.logging.Log,java,eclipse,hadoop,Java,Eclipse,Hadoop,当我试图使用private byte[]startTag声明字节数组时 Eclipse将此行显示为错误。 将鼠标悬停在上面,我收到以下消息: 无法解析类型org.apache.commons.logging.Log。它是从必需的.class文件间接引用的 我试图通过查看其他解决方案将jar文件添加到classpath中,但无法删除错误。 我现在该怎么办 如果需要添加任何特定的jar文件,请说明。import java.io.IOException; import java.io.IOExcept

当我试图使用
private byte[]startTag声明字节数组时

Eclipse将此行显示为错误。
将鼠标悬停在上面,我收到以下消息:

无法解析类型org.apache.commons.logging.Log。它是从必需的.class文件间接引用的

我试图通过查看其他解决方案将
jar
文件添加到
classpath
中,但无法删除错误。
我现在该怎么办

如果需要添加任何特定的
jar
文件,请说明。

import java.io.IOException;
import java.io.IOException;  
import java.util.List;  
import org.apache.hadoop.fs.BlockLocation;  
import org.apache.hadoop.fs.FSDataInputStream;  
import org.apache.hadoop.fs.FileStatus;  
import org.apache.hadoop.fs.FileSystem;  
import org.apache.hadoop.fs.Path;  
import org.apache.hadoop.io.DataOutputBuffer;  
import org.apache.hadoop.io.LongWritable;  
import org.apache.hadoop.io.Text;  
import org.apache.hadoop.mapreduce.InputSplit;  
import org.apache.hadoop.mapreduce.JobContext;  
import org.apache.hadoop.mapreduce.RecordReader;  
import org.apache.hadoop.mapreduce.TaskAttemptContext;  
import org.apache.hadoop.mapreduce.lib.input.FileSplit;  
import org.apache.hadoop.mapreduce.lib.input.TextInputFormat;

public class XmlInputFormat extends TextInputFormat {

    public static final String START_TAG_KEY = "< student>";
    public static final String END_TAG_KEY = "</student>";

    @Override
    public RecordReader<LongWritable, Text> createRecordReader(
       InputSplit split, TaskAttemptContext context) {
       return new XmlRecordReader();
    }

    public static class XmlRecordReader extends 
    RecordReader<LongWritable, Text> {
        private byte[] startTag;
        private byte[] endTag;
        private long start;
        private long end;
        private FSDataInputStream fsin;
        private DataOutputBuffer buffer = new DataOutputBuffer();
        private LongWritable key = new LongWritable();
        private Text value = new Text();

        @Override
        public void initialize(InputSplit is, TaskAttemptContext tac)
        throws IOException, InterruptedException {
            FileSplit fileSplit = (FileSplit) is;
            String START_TAG_KEY = "<employee>";
            String END_TAG_KEY = "</employee>";
            startTag = START_TAG_KEY.getBytes("utf-8");
            endTag = END_TAG_KEY.getBytes("utf-8");

            start = fileSplit.getStart();
            end = start + fileSplit.getLength();
            Path file = fileSplit.getPath();

            FileSystem fs =file.getFileSystem(tac.getConfiguration());
            fsin = fs.open(fileSplit.getPath());
            fsin.seek(start);

        }

        @Override
        public boolean nextKeyValue() throws     
           IOException,InterruptedException {
            if (fsin.getPos() < end) {
                if (readUntilMatch(startTag, false)) {
                    try {
                        buffer.write(startTag);
                        if (readUntilMatch(endTag, true)) {

                            value.set(buffer.getData(), 0,     
                                 buffer.getLength());
                            key.set(fsin.getPos());
                            return true;
                        }
                    } finally {
                        buffer.reset();
                    }
                }
            }
            return false;
        }

        @Override
        public LongWritable getCurrentKey() throws IOException,
        InterruptedException {
            return key;
        }

        @Override
        public Text getCurrentValue() throws IOException,       
          InterruptedException {
            return value;

        }

        @Override
        public float getProgress() throws IOException, 
          InterruptedException {
            return (fsin.getPos() - start) / (float) (end - start);
        }

        @Override
        public void close() throws IOException {
            fsin.close();
        }

        private boolean readUntilMatch(byte[] match, boolean 
            withinBlock)throws IOException {
            int i = 0;
            while (true) {
                int b = fsin.read();

                if (b == -1)
                    return false;

                if (withinBlock)
                    buffer.write(b);

                if (b == match[i]) {
                    i++;
                    if (i >= match.length)
                    return true;
                } else
                    i = 0;

                if (!withinBlock && i == 0 && fsin.getPos() >= end)
                    return false;
            }
        }

    }

}
导入java.util.List; 导入org.apache.hadoop.fs.BlockLocation; 导入org.apache.hadoop.fs.FSDataInputStream; 导入org.apache.hadoop.fs.FileStatus; 导入org.apache.hadoop.fs.FileSystem; 导入org.apache.hadoop.fs.Path; 导入org.apache.hadoop.io.DataOutputBuffer; 导入org.apache.hadoop.io.LongWritable; 导入org.apache.hadoop.io.Text; 导入org.apache.hadoop.mapreduce.InputSplit; 导入org.apache.hadoop.mapreduce.JobContext; 导入org.apache.hadoop.mapreduce.RecordReader; 导入org.apache.hadoop.mapreduce.TaskAttemptContext; 导入org.apache.hadoop.mapreduce.lib.input.FileSplit; 导入org.apache.hadoop.mapreduce.lib.input.TextInputFormat; 公共类XmlInputFormat扩展了TextInputFormat{ 公共静态最终字符串START_TAG_KEY=“”; 公共静态最终字符串END_TAG_KEY=“”; @凌驾 公共记录阅读器createRecordReader( InputSplit拆分,TaskAttemptContext(上下文){ 返回新的XmlRecordReader(); } 公共静态类XmlRecordReader扩展 录像机{ 专用字节[]startTag; 私有字节[]endTag; 私人长期启动; 私人长尾; 私有FSDataInputStream-fsin; 私有DataOutputBuffer=新DataOutputBuffer(); 私有LongWritable密钥=新的LongWritable(); 私有文本值=新文本(); @凌驾 公共void初始化(InputSplit为,TaskAttemptContext tac) 抛出IOException、InterruptedException{ FileSplit FileSplit=(FileSplit)为; 字符串START_TAG_KEY=“”; 字符串END_TAG_KEY=“”; startTag=START_TAG_KEY.getBytes(“utf-8”); endTag=END_TAG_KEY.getBytes(“utf-8”); start=fileSplit.getStart(); end=start+fileSplit.getLength(); Path file=fileSplit.getPath(); FileSystem fs=file.getFileSystem(tac.getConfiguration()); fsin=fs.open(fileSplit.getPath()); fsin.seek(启动); } @凌驾 公共布尔值nextKeyValue()抛出 IOException,InterruptedException{ if(fsin.getPos()=match.length) 返回true; }否则 i=0; 如果(!withinBlock&&i==0&&fsin.getPos()>=end) 返回false; } } } }
问题很可能是您没有提供的行的前一个上下文。如果您需要帮助,请出示一张表格。您可以添加整个代码吗?