Warning: file_get_contents(/data/phpspider/zhask/data//catemap/3/apache-spark/6.json): failed to open stream: No such file or directory in /data/phpspider/zhask/libs/function.php on line 167

Warning: Invalid argument supplied for foreach() in /data/phpspider/zhask/libs/tag.function.php on line 1116

Notice: Undefined index: in /data/phpspider/zhask/libs/function.php on line 180

Warning: array_chunk() expects parameter 1 to be array, null given in /data/phpspider/zhask/libs/function.php on line 181
Apache spark 尝试在drools中使用KieRuntimeLogger生成日志_Apache Spark_Drools_Kie - Fatal编程技术网

Apache spark 尝试在drools中使用KieRuntimeLogger生成日志

Apache spark 尝试在drools中使用KieRuntimeLogger生成日志,apache-spark,drools,kie,Apache Spark,Drools,Kie,这是我以前的帖子 我可以使用“KieRunTimeLogger”在这里生成日志。现在,我试着用另一个例子来说明这个问题,我以前使用print语句在屏幕上显示输出。当我在本地运行我的代码时,它工作正常,但当我在线程模式(应用程序在spark上运行)下运行时,打印语句不会出现。因此,我想创建日志,以便跟踪.drl文件中的哪些规则针对哪个对象被命中。我可以在上面的例子(上面的链接)中这样做,但是当我在这个例子中尝试这样做时,我得到了一个错误 RulesExector.java package com.

这是我以前的帖子 我可以使用“KieRunTimeLogger”在这里生成日志。现在,我试着用另一个例子来说明这个问题,我以前使用print语句在屏幕上显示输出。当我在本地运行我的代码时,它工作正常,但当我在线程模式(应用程序在spark上运行)下运行时,打印语句不会出现。因此,我想创建日志,以便跟踪.drl文件中的哪些规则针对哪个对象被命中。我可以在上面的例子(上面的链接)中这样做,但是当我在这个例子中尝试这样做时,我得到了一个错误

RulesExector.java

package com.rsrit.cob.drools;

import java.io.BufferedWriter;
import java.io.OutputStreamWriter;
import java.io.Serializable;

import org.kie.api.KieServices;
import org.kie.api.logger.KieRuntimeLogger;
import org.kie.api.runtime.KieContainer;
import org.kie.api.runtime.KieSession;
import org.kie.api.runtime.StatelessKieSession;
import org.kie.internal.command.CommandFactory;

import com.rsrit.cob.Variables.ClaimInfo;
import com.rsrit.cob.drools.KieSessionFactory;

import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

@SuppressWarnings("serial")
public class RulesExecutor implements Serializable{
    KieServices ks = KieServices.Factory.get();
    KieContainer kContainer = ks.getKieClasspathContainer();
    public static BufferedWriter log = new BufferedWriter(new OutputStreamWriter(System.out)); 
    @SuppressWarnings("unchecked")
    public ClaimInfo evalRules(ClaimInfo claimObj,String ruleFileLoc){
        if (ruleFileLoc != null){

        //StatelessKieSession ksession = KieSessionFactory.getKieSession(ruleFileLoc);
        KieSession ksession = kContainer.newKieSession("ksession-rule");

        KieRuntimeLogger logger = KieServices.Factory.get().getLoggers().newFileLogger(ksession,"C://Users/katuk/eclipse-workspace/test");
        //ksession.insert("claim id");
        ksession.execute(CommandFactory.newInsert(claimObj));
        }else{
            try{
            log.write("Rules File Location is Invalid or Null\n");
             log.flush();
            }catch(Exception e){
                e.printStackTrace();
            }
        }
        return claimObj;
    }
    /*public static String ruleFileConnection(String _ruleFileLoc){

        try{
            String rulesPath = _ruleFileLoc;
            ClassLoader loader =Thread.currentThread().getContextClassLoader();
            Properties props = new Properties();
            try(InputStream rulesLocStream = loader.getResourceAsStream(rulesPath)){
                props.load(rulesLocStream);
            }
            return props.getProperty("ruleFileLoc");

        } catch (FileNotFoundException ex) {
            return null;
        } catch (IOException ex) {
            return null;
        }
    }*/

}
kmodule.xml

<?xml version="1.0" encoding="UTF-8"?>
<kmodule xmlns="http://jboss.org/kie/7.1.0.Final/kmodule">
  <kbase name="KBase" default="true" packages="com.rsrit.cob">
    <ksession name="KSession" type="stateless" default="true" />
  </kbase>
</kmodule>

RecoverableClaimsMain.java

package com.rsrit.cob.application;

import com.rsrit.cob.drools.*;
import com.rsrit.cob.Variables.*;
import com.rsrit.cob.dataSource.*;

import java.io.BufferedReader;
import java.io.FileNotFoundException;
import java.io.FileReader;
import java.io.IOException;
import java.text.ParseException;
import java.util.ArrayList;
import java.util.List;
import org.apache.spark.api.java.JavaRDD;
import org.apache.spark.api.java.JavaSparkContext;
import org.apache.spark.broadcast.Broadcast;
import org.apache.spark.sql.SparkSession;

public class RecoverableClaimsMain {

    public static void main(String[] args){
        // TODO Auto-generated method stub

        List<ClaimInfo> inputData = new ArrayList<ClaimInfo>();

        PrimaryClaimInfo primaryObj;

        // InputStreamReader bReader = new InputStreamReader(new
        // FileInputStream(new File(args[0])));
        BufferedReader bReader = null;
        try {
            bReader = new BufferedReader(new FileReader(args[0]));

            String currentLine;
            bReader.readLine();

            while ((currentLine = bReader.readLine()) != null) {

                ParseClaimsFromTextFile textfile = new ParseClaimsFromTextFile(currentLine);
                //System.out.println("testing the current line "+currentLine);

                /*
                 * After Initializing all the variables, we have to create a
                 * substring that only takes the Last 3 characters of the
                 * membership Id. we are considering that a particular
                 * membership is primary or secondary is determined by the last
                 * 3 digits like if the membership id is ending with -01 or -02
                 * then we that member to be secondary, else we are considering
                 * all the members to be primary
                 * 
                 */

                String subStg_MBSID = textfile.getMembership_ID().substring(textfile.getMembership_ID().length() - 3);
                /*
                 * Now we are creating an if-else statement to do the specific
                 * operations on Primary Members and secondary Members
                 */
                if (subStg_MBSID.equals("-01") || subStg_MBSID.equals("-02")) {
                    /*
                     * if the membership of the record is secondary then we call
                     * a method which creates a new object for linking the
                     * primary Member.
                     */

                    //System.out.println(textfile.getMembership_ID());
                    primaryObj = textfile.primary_Member_Info(textfile.getMembership_ID(), textfile.getLine_Date_Tst(),
                            textfile.getClaim_id(), args[1]);
                    /*
                     * After creating the Object for Primary Member , we call
                     * create new object which take the previous primary member
                     * object as one of its arguments and add that object to our
                     * ArrayList.
                     */

                    if (primaryObj != null) {
                        inputData.add(new ClaimInfo(textfile.getClaim_id(), textfile.getLineNum(),
                                textfile.getPrimacy_value(), textfile.getProcedure_covered(),
                                textfile.getProvider_Zipcode(), textfile.getPart_Type(),
                                textfile.getEvent_Names().split(","), textfile.getMbs_Type(),
                                textfile.getEmployer_Size_Aged(), textfile.getEmployer_Size_Disabled(),
                                textfile.getLine_Date_Tst(), textfile.getEvent_Start_Dates().split(","),
                                textfile.getEvent_End_Dates().split(","), textfile.currentTime(),
                                textfile.previousYearTime(), textfile.getPrimary_Memeber(),
                                textfile.getPsn_First_Name(), textfile.getPsn_Last_Name(), textfile.getCharge_amount(),
                                textfile.getNet_Charged_Amt(), textfile.getMembership_ID(), primaryObj, subStg_MBSID,
                                textfile.getProvider_Name(), textfile.getAge(), textfile.getGender()));

                        System.out.println("claims from secondary members: "+textfile.getMembership_ID());
                    } else {

                        System.out.println("Not Enough valid Information of the Primary Member of the Claim "+textfile.getClaim_id());
                    }
                } else {

                    /*
                     * Lets say if the member of the record is primary then we
                     * can directly create an Object using all the variables
                     * that we have created from the columns of the result-set
                     * and we add that object to our ArrayList
                     */

                    inputData.add(new ClaimInfo(textfile.getClaim_id(), textfile.getLineNum(),
                            textfile.getPrimacy_value(), textfile.getProcedure_covered(),
                            textfile.getProvider_Zipcode(), textfile.getPart_Type(),
                            textfile.getEvent_Names().split(","), textfile.getMbs_Type(),
                            textfile.getEmployer_Size_Aged(), textfile.getEmployer_Size_Disabled(),
                            textfile.getLine_Date_Tst(), textfile.getEvent_Start_Dates().split(","),
                            textfile.getEvent_End_Dates().split(","), textfile.currentTime(),
                            textfile.previousYearTime(), textfile.getPrimary_Memeber(), textfile.getPsn_First_Name(),
                            textfile.getPsn_Last_Name(), textfile.getCharge_amount(), textfile.getNet_Charged_Amt(),
                            textfile.getMembership_ID(), subStg_MBSID, textfile.getProvider_Name(), textfile.getAge(),
                            textfile.getGender()));

                }

            }
        } catch (FileNotFoundException ex) {
            ex.printStackTrace();
        } catch (IOException ex) {
            ex.printStackTrace();
        } catch (Exception ex) {
            ex.printStackTrace();
        } finally {
            try {
                bReader.close();
            } catch (IOException e) {
                // TODO Auto-generated catch block
                e.printStackTrace();
            }
        }

        /*
         * Create a sparkSession with master as local since we are not running
         * our application on any multi-node cluster(for NOW) then using that
         * sparksession create new sparkcontext
         */

        SparkSession spark = SparkSession.builder().appName("Phase-one Test").master("local").getOrCreate();

        JavaSparkContext sc = new JavaSparkContext(spark.sparkContext());

        /*
         * Now lets start a KieSession by creating a new releExecutor Object
         */
        RulesExecutor rulesExecutor = new RulesExecutor();

        /*
         * Now broadcast the KieSession to different nodes, in-case if you need
         * to run your application on an multi-node cluster in future.
         */
        Broadcast<RulesExecutor> broadcastRules = sc.broadcast(rulesExecutor);

        JavaRDD<ClaimInfo> claims = sc.parallelize(inputData);

        System.out.println(claims.count());
        /*
         * After creating an JavaRDD now apply the the mapping function on the
         * method applyRules() method where we send the Rules that were
         * broadcasted and claimInfo object as input arguments to the method.
         * After applying the map function we can count the number of
         * recoverable claims easily by count the filtered objects WHOSE
         * recoverable variable as TRUE.
         */
        JavaRDD<ClaimInfo> appliedClaims = claims
                .map(mainclaims -> broadcastRules.value().evalRules(mainclaims, args[2]));

        JavaRDD<ClaimInfo> recoveredClaims = appliedClaims.filter(mainClaims -> mainClaims.isRecoverable());

        System.out.println("Number of claims recovered: " + recoveredClaims.count());


        sc.close();

        spark.close();
    }
}
包com.rsrit.cob.application;
导入com.rsrit.cob.drools.*;
导入com.rsrit.cob.Variables.*;
导入com.rsrit.cob.dataSource.*;
导入java.io.BufferedReader;
导入java.io.FileNotFoundException;
导入java.io.FileReader;
导入java.io.IOException;
导入java.text.ParseException;
导入java.util.ArrayList;
导入java.util.List;
导入org.apache.spark.api.java.JavaRDD;
导入org.apache.spark.api.java.JavaSparkContext;
导入org.apache.spark.broadcast.broadcast;
导入org.apache.spark.sql.SparkSession;
公共类可恢复索赔{
公共静态void main(字符串[]args){
//TODO自动生成的方法存根
List inputData=new ArrayList();
PrimaryClaimInfo primaryObj;
//InputStreamReader bReader=新的InputStreamReader(新的
//FileInputStream(新文件(args[0]));
BufferedReader bReader=null;
试一试{
bReader=newbufferedreader(新文件读取器(args[0]);
串电流线;
bReader.readLine();
while((currentLine=bReader.readLine())!=null){
ParseClaimsFromTextFile textfile=新的ParseClaimsFromTextFile(currentLine);
//System.out.println(“测试当前线路”+当前线路);
/*
*初始化所有变量后,我们必须创建一个
*子字符串,该子字符串只接受
*会员身份。我们正在考虑
*成员资格是主要的还是次要的取决于最后一个
*3位数字,如成员id以-01或-02结尾
*那么我们认为该成员是次要的,否则我们正在考虑
*所有成员都是主要成员
* 
*/
String subsg_MBSID=textfile.getmembershid().substring(textfile.getmembershid().length()-3);
/*
*现在我们创建一个if-else语句来执行特定的操作
*对主要成员和次要成员的操作
*/
if(subsg_MBSID.equals(“-01”)| | subsg_MBSID.equals(“-02”)){
/*
*如果记录的成员是次要的,那么我们调用
*一种方法,用于创建新对象以链接
*主要成员。
*/
//System.out.println(textfile.getMembership_ID());
primaryObj=textfile.primary\u Member\u Info(textfile.getMembership\u ID(),textfile.getLine\u Date\u Tst(),
textfile.getClaim_id(),args[1]);
/*
*为主要成员创建对象后,我们调用
*创建采用上一个主成员的新对象
*对象作为其参数之一,并将该对象添加到
*ArrayList。
*/
if(primaryObj!=null){
inputData.add(新的ClaimInfo(textfile.getClaim_id(),textfile.getLineNum(),
textfile.getprimarcy_value(),textfile.getProcedure_covered(),
textfile.getProvider_Zipcode(),textfile.getPart_Type(),
textfile.getEvent_Names().split(“,”),textfile.getMbs_Type(),
textfile.getEmployer\u Size\u age(),textfile.getEmployer\u Size\u Disabled(),
textfile.getLine\u Date\u Tst(),textfile.getEvent\u Start\u Dates().拆分(“,”,
textfile.getEvent\u End\u Dates().split(“,”),textfile.currentTime(),
textfile.previousYearTime(),textfile.getPrimary_Memeber(),
textfile.getPsn_First_Name(),textfile.getPsn_Last_Name(),textfile.getCharge_amount(),
textfile.getNet\u Amt(),textfile.getMembership\u ID(),primaryObj,subsg\u MBSID,
textfile.getProvider_Name()、textfile.getAge()、textfile.getGender());
System.out.println(“来自次要成员的声明:+textfile.getMembership_ID());
}否则{
System.out.println(“声明的主要成员的有效信息不足”+textfile.getClaim_id());
}
}否则{
/*
*假设记录的成员是主成员,那么我们
*可以使用所有变量直接创建对象
*我们从结果集的列中创建的
*我们将该对象添加到ArrayList中
*/
inputData.add(新的ClaimInfo(textfile.getClaim_id(),textfile.getLineNum(),
textfile.getprimarcy_value(),textfile.getProcedure_covered(),
textfile.getProvider_Zipcode(),textfile.getPart_Type(),
textfile.getEvent_Names().split(“,”),textfile.getMbs_Type(),
文本文件
Using Spark's default log4j profile: org/apache/spark/log4j-defaults.properties
18/08/20 11:46:43 INFO SparkContext: Running Spark version 2.3.0
18/08/20 11:46:43 WARN NativeCodeLoader: Unable to load native-hadoop library for your platform... using builtin-java classes where applicable
18/08/20 11:46:43 INFO SparkContext: Submitted application: Phase-one Test
18/08/20 11:46:44 INFO SecurityManager: Changing view acls to: katuk
18/08/20 11:46:44 INFO SecurityManager: Changing modify acls to: katuk
18/08/20 11:46:44 INFO SecurityManager: Changing view acls groups to: 
18/08/20 11:46:44 INFO SecurityManager: Changing modify acls groups to: 
18/08/20 11:46:44 INFO SecurityManager: SecurityManager: authentication disabled; ui acls disabled; users  with view permissions: Set(katuk); groups with view permissions: Set(); users  with modify permissions: Set(katuk); groups with modify permissions: Set()
18/08/20 11:46:45 INFO Utils: Successfully started service 'sparkDriver' on port 53222.
18/08/20 11:46:45 INFO SparkEnv: Registering MapOutputTracker
18/08/20 11:46:45 INFO SparkEnv: Registering BlockManagerMaster
18/08/20 11:46:45 INFO BlockManagerMasterEndpoint: Using org.apache.spark.storage.DefaultTopologyMapper for getting topology information
18/08/20 11:46:45 INFO BlockManagerMasterEndpoint: BlockManagerMasterEndpoint up
18/08/20 11:46:45 INFO DiskBlockManager: Created local directory at C:\Users\katuk\AppData\Local\Temp\blockmgr-11105d7d-3dc9-412b-959a-8a917e887744
18/08/20 11:46:45 INFO MemoryStore: MemoryStore started with capacity 891.0 MB
18/08/20 11:46:45 INFO SparkEnv: Registering OutputCommitCoordinator
18/08/20 11:46:45 INFO Utils: Successfully started service 'SparkUI' on port 4040.
18/08/20 11:46:45 INFO SparkUI: Bound SparkUI to 0.0.0.0, and started at http://WINDOWS-2G24VKQ:4040
18/08/20 11:46:45 INFO Executor: Starting executor ID driver on host localhost
18/08/20 11:46:45 INFO Utils: Successfully started service 'org.apache.spark.network.netty.NettyBlockTransferService' on port 53235.
18/08/20 11:46:45 INFO NettyBlockTransferService: Server created on WINDOWS-2G24VKQ:53235
18/08/20 11:46:45 INFO BlockManager: Using org.apache.spark.storage.RandomBlockReplicationPolicy for block replication policy
18/08/20 11:46:45 INFO BlockManagerMaster: Registering BlockManager BlockManagerId(driver, WINDOWS-2G24VKQ, 53235, None)
18/08/20 11:46:45 INFO BlockManagerMasterEndpoint: Registering block manager WINDOWS-2G24VKQ:53235 with 891.0 MB RAM, BlockManagerId(driver, WINDOWS-2G24VKQ, 53235, None)
18/08/20 11:46:45 INFO BlockManagerMaster: Registered BlockManager BlockManagerId(driver, WINDOWS-2G24VKQ, 53235, None)
18/08/20 11:46:45 INFO BlockManager: Initialized BlockManager: BlockManagerId(driver, WINDOWS-2G24VKQ, 53235, None)
18/08/20 11:46:46 INFO ClasspathKieProject: Found kmodule: file:/D:/HealthCare/Molina/ExecutableJarFileWithResources/molinaHealthcare/target/classes/META-INF/kmodule.xml
18/08/20 11:46:46 ERROR ClasspathKieProject: Unable to build index of kmodule.xml url=file:/D:/HealthCare/Molina/ExecutableJarFileWithResources/molinaHealthcare/target/classes/META-INF/kmodule.xml
XSD validation failed against the new schema (cvc-elt.1: Cannot find the declaration of element 'kmodule'.) and against the old schema (cvc-elt.1: Cannot find the declaration of element 'kmodule'.).
18/08/20 11:46:46 INFO MemoryStore: Block broadcast_0 stored as values in memory (estimated size 1416.0 B, free 891.0 MB)
Exception in thread "main" java.io.NotSerializableException: org.drools.compiler.kie.builder.impl.KieContainerImpl
Serialization stack:
    - object not serializable (class: org.drools.compiler.kie.builder.impl.KieContainerImpl, value: org.drools.compiler.kie.builder.impl.KieContainerImpl@17814b1c)
    - field (class: com.rsrit.cob.drools.RulesExecutor, name: kContainer, type: interface org.kie.api.runtime.KieContainer)
    - object (class com.rsrit.cob.drools.RulesExecutor, com.rsrit.cob.drools.RulesExecutor@46d8f407)
    at org.apache.spark.serializer.SerializationDebugger$.improveException(SerializationDebugger.scala:40)
    at org.apache.spark.serializer.JavaSerializationStream.writeObject(JavaSerializer.scala:46)
    at org.apache.spark.broadcast.TorrentBroadcast$$anonfun$blockifyObject$2.apply(TorrentBroadcast.scala:291)
    at org.apache.spark.broadcast.TorrentBroadcast$$anonfun$blockifyObject$2.apply(TorrentBroadcast.scala:291)
    at org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:1377)
    at org.apache.spark.broadcast.TorrentBroadcast$.blockifyObject(TorrentBroadcast.scala:292)
    at org.apache.spark.broadcast.TorrentBroadcast.writeBlocks(TorrentBroadcast.scala:127)
    at org.apache.spark.broadcast.TorrentBroadcast.<init>(TorrentBroadcast.scala:88)
    at org.apache.spark.broadcast.TorrentBroadcastFactory.newBroadcast(TorrentBroadcastFactory.scala:34)
    at org.apache.spark.broadcast.BroadcastManager.newBroadcast(BroadcastManager.scala:62)
    at org.apache.spark.SparkContext.broadcast(SparkContext.scala:1481)
    at org.apache.spark.api.java.JavaSparkContext.broadcast(JavaSparkContext.scala:650)
    at com.rsrit.cob.application.RecoverableClaimsMain.main(RecoverableClaimsMain.java:151)
18/08/20 11:46:46 INFO SparkContext: Invoking stop() from shutdown hook
18/08/20 11:46:46 INFO SparkUI: Stopped Spark web UI at http://WINDOWS-2G24VKQ:4040
18/08/20 11:46:46 INFO MapOutputTrackerMasterEndpoint: MapOutputTrackerMasterEndpoint stopped!
18/08/20 11:46:46 INFO MemoryStore: MemoryStore cleared
18/08/20 11:46:46 INFO BlockManager: BlockManager stopped
18/08/20 11:46:46 INFO BlockManagerMaster: BlockManagerMaster stopped
18/08/20 11:46:46 INFO OutputCommitCoordinator$OutputCommitCoordinatorEndpoint: OutputCommitCoordinator stopped!
18/08/20 11:46:46 INFO SparkContext: Successfully stopped SparkContext
18/08/20 11:46:46 INFO ShutdownHookManager: Shutdown hook called
18/08/20 11:46:46 INFO ShutdownHookManager: Deleting directory C:\Users\katuk\AppData\Local\Temp\spark-f12fc1c9-7cce-43e3-8a00-da2e753c49bd