Warning: file_get_contents(/data/phpspider/zhask/data//catemap/9/solr/3.json): failed to open stream: No such file or directory in /data/phpspider/zhask/libs/function.php on line 167

Warning: Invalid argument supplied for foreach() in /data/phpspider/zhask/libs/tag.function.php on line 1116

Notice: Undefined index: in /data/phpspider/zhask/libs/function.php on line 180

Warning: array_chunk() expects parameter 1 to be array, null given in /data/phpspider/zhask/libs/function.php on line 181
Java 使用Lucene使多值字段评分了解字段值计数_Java_Solr_Lucene - Fatal编程技术网

Java 使用Lucene使多值字段评分了解字段值计数

Java 使用Lucene使多值字段评分了解字段值计数,java,solr,lucene,Java,Solr,Lucene,我正在尝试将(多单词)标记列表与每个文档关联。 因此,对于每个文档,我将添加多个StringField条目,并将“tag”作为fieldName 搜索时,我希望分数与我成功匹配的标记的比率成比例,例如: 如果我匹配一半的标签,则为0.5 1.0,如果我将它们全部匹配 但似乎分数中没有考虑标签的数量 在对这四个文档进行测试时: - tags.put("doc1", "piano, electric guitar, violon"); - tags.put("doc2", "piano,

我正在尝试将(多单词)标记列表与每个文档关联。 因此,对于每个文档,我将添加多个StringField条目,并将“tag”作为fieldName

搜索时,我希望分数与我成功匹配的标记的比率成比例,例如:

  • 如果我匹配一半的标签,则为0.5
  • 1.0,如果我将它们全部匹配
但似乎分数中没有考虑标签的数量

在对这四个文档进行测试时:

 - tags.put("doc1", "piano, electric guitar, violon");

 - tags.put("doc2", "piano, electric guitar");

 - tags.put("doc3", "piano");

 - tags.put("doc4", "electric guitar"); 
我得到的是:

 - Score : 1.0 
 Doc : Document<stored,indexed,tokenized,omitNorms,indexOptions=DOCS_ONLY<id:doc4> stored,indexed,tokenized,omitNorms,indexOptions=DOCS_ONLY<tag:electric guitar>>
 - Score : 1.0 
 Doc : Document<stored,indexed,tokenized,omitNorms,indexOptions=DOCS_ONLY<id:doc2> stored,indexed,tokenized,omitNorms,indexOptions=DOCS_ONLY<tag:piano> stored,indexed,tokenized,omitNorms,indexOptions=DOCS_ONLY<tag:electric guitar>>
 - Score : 1.0 
 Doc : Document<stored,indexed,tokenized,omitNorms,indexOptions=DOCS_ONLY<id:doc1> stored,indexed,tokenized,omitNorms,indexOptions=DOCS_ONLY<tag:piano> stored,indexed,tokenized,omitNorms,indexOptions=DOCS_ONLY<tag:electric guitar> stored,indexed,tokenized,omitNorms,indexOptions=DOCS_ONLY<tag:violon>>
-分数:1.0
文件:文件
-分数:1.0
文件:文件
-分数:1.0
文件:文件
我怎样才能改变这种行为?我是否错过了正确的做事方式

下面是我的测试代码

致以最良好的祝愿

雷诺

public class LuceneQueryTest {

    Analyzer analyzer;
    BasicIndex basicIndex;
    LinkedList<String> phrases;
    Query query;
    Map<Document, Float> results;

    @Test
    public void testListOfTags() throws Exception {

        analyzer = new StandardAnalyzer();

        basicIndex = new BasicIndex(analyzer);

        Map<String, String> tags = new HashMap();

        tags.put("doc1", "piano, electric guitar, violon");

        tags.put("doc2", "piano, electric guitar");

        tags.put("doc3", "piano");

        tags.put("doc4", "electric guitar");

        Queue<String> queue = new LinkedList<>();
        queue.addAll(tags.keySet());

        basicIndex.index(new Supplier<Document>() {

            public Document get() {
                Document doc = new Document();

                if (queue.isEmpty()) {
                    return null;
                }

                String docName = queue.poll();

                System.out.println("**** "+docName);

                String tag = tags.get(docName);
                doc.add(new StringField("id", docName, Field.Store.YES));

                for (String tagItem : tag.split("\\,")) {
                    System.out.println(tagItem);
                    Field tagField;
                    tagField = new StringField("tag",tagItem,Field.Store.YES);

                    System.out.println(tagField);

                    doc.add(tagField);
                }
                return doc;
            }
        });

        BooleanQuery booleanQuery = new BooleanQuery();
        //booleanQuery.add(new TermQuery(new Term("tag", "piano")), BooleanClause.Occur.SHOULD);
        booleanQuery.add(new TermQuery(new Term("tag", "electric guitar")), BooleanClause.Occur.SHOULD);

        //Query parsedQuery = new QueryParser("tag", analyzer).parse("tag:\"electric guitar\"");
        query = booleanQuery;
        //query = parsedQuery;


        System.out.println(query);

        results = basicIndex.search(query);
        displayResults(results);

        System.out.println(Arrays.toString(basicIndex.document(3).getValues("tag")));

    }

    private void displayResults(Map<Document, Float> results) {
        results.forEach((Document doc, Float score) -> {
            System.out.println("Score : " + score + " \n Doc : " + doc);
        });
    }
}
公共类LuceneQueryTest{ 分析仪; BasicIndex BasicIndex; 链接列表短语; 查询; 地图结果; @试验 public void testListOfTags()引发异常{ analyzer=新的StandardAnalyzer(); basicIndex=新的basicIndex(分析仪); Map tags=newhashmap(); 放置(“doc1”,“钢琴,电吉他,小提琴”); 标签。放置(“doc2”,“钢琴,电吉他”); 标签。放置(“doc3”,“钢琴”); 标签。放置(“doc4”,“电吉他”); Queue Queue=new LinkedList(); queue.addAll(tags.keySet()); basicIndex.索引(新供应商(){ 公共文档get(){ 单据单据=新单据(); if(queue.isEmpty()){ 返回null; } 字符串docName=queue.poll(); System.out.println(“*****”+docName); String tag=tags.get(docName); doc.add(新的StringField(“id”,docName,Field.Store.YES)); 用于(字符串tagItem:tag.split(“\\,”)){ 系统输出打印项次(标记项); 字段标记字段; tagField=new-StringField(“tag”,tagItem,Field.Store.YES); System.out.println(标记字段); 文件添加(标记字段); } 退货单; } }); BooleanQuery BooleanQuery=新的BooleanQuery(); //add(newtermquery(newterm(“tag”,“piano”)),BooleanClause.occure.SHOULD); add(新术语查询(新术语(“标记”、“电吉他”))、BooleanClause.occure.SHOULD); //Query parsedQuery=new QueryParser(“tag”,analyzer).parse(“tag:\“电吉他”); 查询=布尔查询; //query=parsedQuery; System.out.println(查询); 结果=basicIndex.search(查询); 显示结果(结果); System.out.println(Arrays.toString(basicIndex.document(3.getValues)(“tag”)); } 私有void显示结果(映射结果){ 结果.forEach((文档文档,浮动分数)->{ System.out.println(“分数:+Score+”\n文档:+Doc); }); } } BasicIndex(测试实用程序)类的代码:

import java.io.IOException;
导入java.util.LinkedHashMap;
导入java.util.LinkedList;
导入java.util.Map;
导入java.util.function.function;
导入java.util.function.Supplier;
导入org.apache.lucene.analysis.Analyzer;
导入org.apache.lucene.document.document;
导入org.apache.lucene.document.Field;
导入org.apache.lucene.document.TextField;
导入org.apache.lucene.index.DirectoryReader;
导入org.apache.lucene.index.IndexReader;
导入org.apache.lucene.index.IndexWriter;
导入org.apache.lucene.index.IndexWriterConfig;
导入org.apache.lucene.search.indexsearch;
导入org.apache.lucene.search.Query;
导入org.apache.lucene.search.ScoreDoc;
导入org.apache.lucene.search.TopScoreDocCollector;
导入org.apache.lucene.store.Directory;
导入org.apache.lucene.store.RAMDirectory;
导入org.apache.lucene.util.Version;
/**
*
*@作者雷诺
*/
公共类BasicIndex{
final Directory=new RAMDirectory();
最终索引编写器索引编写器;
最终分析仪;
公用BasicIndex(分析仪){
this.analyzer=分析器;
this.indexWriter=newIndexWriter();
}
公共分析器getAnalyzer(){
回波分析仪;
}
私有索引编写器newIndexWriter(){
IndexWriterConfig配置=新的IndexWriterConfig(最新版本,analyzer);
试一试{
返回新的IndexWriter(目录,配置);
}捕获(IOEX异常){
抛出新的运行时异常(ex);
}
}
公共索引搜索器newindexsearch(){
返回新的IndexSearcher(newIndexReader());
}
公共索引阅读器newIndexReader(){
索引阅读器;
试一试{
reader=DirectoryReader.open(目录);
}捕获(IOEX异常){
抛出ExceptionUtils.asRuntimeException(ex);
}
返回读取器;
}
公共无效索引(LinkedList词组,最终字符串字段名){
索引(短语,(字符串短语)->{
单据单据=新单据();
Field workField=新文本字段(字段名、短语、字段.Store.YES);
单据新增(工作字段);
退货单;
});
}
公共作废索引(供应商文件){
文件;
而((document=documents.get())!=null){
试一试{
indexWriter.addDocument(文档);
}捕获(IOE异常){
抛出ExceptionUtils.asRuntimeException(e);
}
}
close();
}
公共无效索引(LinkedList短语、函数docBuilder){
for(字符串短语:短语){
试一试{
indexWriter.addDocument(docBuilder.apply(短语));
}捕获(IOE异常){
抛出ExceptionUtils.asRuntimeException(e);
}
}
close();
}
私人作废关闭(){
IOUtils.closes(indexWriter);
}
公共地图搜索(查询){
final IndexSearcher IndexSearcher=newIndexSearcher();
int hitsPerPage=10;
上部核心
import java.io.IOException;
import java.util.LinkedHashMap;
import java.util.LinkedList;
import java.util.Map;
import java.util.function.Function;
import java.util.function.Supplier;
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
import org.apache.lucene.document.TextField;
import org.apache.lucene.index.DirectoryReader;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.index.IndexWriterConfig;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.ScoreDoc;
import org.apache.lucene.search.TopScoreDocCollector;
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.RAMDirectory;
import org.apache.lucene.util.Version;

/**
 *
 * @author renaud
 */
public class BasicIndex {

final Directory directory = new RAMDirectory();
final IndexWriter indexWriter;
final Analyzer analyzer;

public BasicIndex(Analyzer analyzer) {
    this.analyzer = analyzer;
    this.indexWriter = newIndexWriter();
}

public Analyzer getAnalyzer() {
    return analyzer;
}

private IndexWriter newIndexWriter() {
    IndexWriterConfig config = new IndexWriterConfig(Version.LATEST, analyzer);
    try {
        return new IndexWriter(directory, config);
    } catch (IOException ex) {
        throw new RuntimeException(ex);
    }
}

public IndexSearcher newIndexSearcher() {
    return new IndexSearcher(newIndexReader());
}

public IndexReader newIndexReader() {
    IndexReader reader;
    try {

        reader = DirectoryReader.open(directory);
    } catch (IOException ex) {
        throw ExceptionUtils.asRuntimeException(ex);
    }
    return reader;
}

public void index(LinkedList<String> phrases, final String fieldName) {
    index(phrases, (String phrase) -> {
        Document doc = new Document();

        Field workField = new TextField(fieldName, phrase, Field.Store.YES);
        doc.add(workField);
        return doc;
    });
}

public void index(Supplier<Document> documents) {
    Document document;
    while ((document = documents.get()) != null) {
        try {
            indexWriter.addDocument(document);
        } catch (IOException e) {
            throw ExceptionUtils.asRuntimeException(e);
        }
    }
    close();
}

public void index(LinkedList<String> phrases, Function<String, Document> docBuilder) {
    for (String phrase : phrases) {
        try {
            indexWriter.addDocument(docBuilder.apply(phrase));
        } catch (IOException e) {
            throw ExceptionUtils.asRuntimeException(e);
        }
    }
    close();
}

private void close() {
    IOUtils.closeSilently(indexWriter);
}

public Map<Document, Float> search(Query query) {
    final IndexSearcher indexSearcher = newIndexSearcher();
    int hitsPerPage = 10;
    TopScoreDocCollector collector = TopScoreDocCollector.create(hitsPerPage, true);
    try {
        indexSearcher.search(query, collector);
    } catch (IOException ex) {
        throw new RuntimeException(ex);
    }

    ScoreDoc[] hits = collector.topDocs().scoreDocs;

    Map<Document, Float> results = new LinkedHashMap<>();
    for (int i = 0; i < hits.length; ++i) {
        ScoreDoc scoreDoc = hits[i];
        int docId = scoreDoc.doc;
        float score = scoreDoc.score;
        Document doc;
        try {
            doc = indexSearcher.doc(docId);
        } catch (IOException ex) {
            throw new RuntimeException(ex);
        }
        results.put(doc, score);
    }


    return results;
}

public Document document(int i){
    try {
        return newIndexSearcher().doc(i);
    } catch (IOException ex) {
        throw new RuntimeException(ex);
    }
}
Score : 0.5 
 Doc : Document<stored,indexed,tokenized,omitNorms,indexOptions=DOCS_ONLY<id:doc4> stored<count:1> stored,indexed,tokenized,omitNorms,indexOptions=DOCS_ONLY<tag:electric guitar>>
Score : 0.33333334 
 Doc : Document<stored,indexed,tokenized,omitNorms,indexOptions=DOCS_ONLY<id:doc2> stored<count:2> stored,indexed,tokenized,omitNorms,indexOptions=DOCS_ONLY<tag:piano> stored,indexed,tokenized,omitNorms,indexOptions=DOCS_ONLY<tag:electric guitar>>
Score : 0.25 
 Doc : Document<stored,indexed,tokenized,omitNorms,indexOptions=DOCS_ONLY<id:doc1> stored<count:3> stored,indexed,tokenized,omitNorms,indexOptions=DOCS_ONLY<tag:piano> stored,indexed,tokenized,omitNorms,indexOptions=DOCS_ONLY<tag:electric guitar> stored,indexed,tokenized,omitNorms,indexOptions=DOCS_ONLY<tag:violon>>
    @Test
    public void testListOfTags() throws Exception {

        analyzer = new StandardAnalyzer();

        basicIndex = new BasicIndex(analyzer);

        Map<String, String> tags = new HashMap();

        tags.put("doc1", "piano, electric guitar, violon");

        tags.put("doc2", "piano, electric guitar");

        tags.put("doc3", "piano");

        tags.put("doc4", "electric guitar");

        Queue<String> queue = new LinkedList<>();
        queue.addAll(tags.keySet());

        basicIndex.index(new Supplier<Document>() {

            public Document get() {
                Document doc = new Document();

                if (queue.isEmpty()) {
                    return null;
                }

                String docName = queue.poll();

                System.out.println("**** " + docName);

                String tag = tags.get(docName);
                doc.add(new StringField("id", docName, Field.Store.YES));
                String[] tags = tag.split("\\,");

                Field tagCountField = new IntField("count", tags.length, Field.Store.YES);
                doc.add(tagCountField);

                for (String tagItem : tags) {
                    System.out.println(tagItem);
                    Field tagField;
                    tagField = new StringField("tag", tagItem.trim(), Field.Store.YES);

                    System.out.println(tagField);

                    doc.add(tagField);
                }
                return doc;
            }
        });

        BooleanQuery booleanQuery = new BooleanQuery();
        //booleanQuery.add(new TermQuery(new Term("tag", "piano")), BooleanClause.Occur.SHOULD);
        booleanQuery.add(new TermQuery(new Term("tag", "electric guitar")), BooleanClause.Occur.SHOULD);

        //Query parsedQuery = new QueryParser("tag", analyzer).parse("tag:\"electric guitar\"");
        query = booleanQuery;
        //query = parsedQuery;


        ValueSource boostSource = new ReciprocalFloatFunction(new IntFieldSource("count"), 1, 1, 1);
        query = new BoostedQuery(query, boostSource);

        System.out.println(query);

        results = basicIndex.search(query);
        displayResults(results);

        System.out.println(Arrays.toString(basicIndex.document(3).getValues("tag")));

    }