通过上下文将Unity C#字符串发送给Watson对话/助手

通过上下文将Unity C#字符串发送给Watson对话/助手,c#,unity3d,ibm-watson,watson,watson-conversation,C#,Unity3d,Ibm Watson,Watson,Watson Conversation,如何从Unity C#向Watson对话服务发送字符串?我知道我必须通过上下文来完成,但我不知道如何将字符串添加到上下文中或访问上下文来完成。之后,我需要将上下文发送到Watson对话服务,这样我就可以使用字符串作为上下文变量的值。有人能帮我一下吗,或者解释一下如何用我自己的Unity变量将上下文发送给Watson对话 这是我到目前为止的脚本: using System.Collections; using System.Collections.Generic; using UnityEngin

如何从Unity C#向Watson对话服务发送字符串?我知道我必须通过上下文来完成,但我不知道如何将字符串添加到上下文中或访问上下文来完成。之后,我需要将上下文发送到Watson对话服务,这样我就可以使用字符串作为上下文变量的值。有人能帮我一下吗,或者解释一下如何用我自己的Unity变量将上下文发送给Watson对话

这是我到目前为止的脚本:

using System.Collections;
using System.Collections.Generic;
using UnityEngine;
using IBM.Watson.DeveloperCloud.Services.TextToSpeech.v1;
using IBM.Watson.DeveloperCloud.Services.Conversation.v1;
//using IBM.Watson.DeveloperCloud.Services.Assistant.v1;
using IBM.Watson.DeveloperCloud.Services.ToneAnalyzer.v3;
using IBM.Watson.DeveloperCloud.Services.SpeechToText.v1;
using IBM.Watson.DeveloperCloud.Logging;
using IBM.Watson.DeveloperCloud.Utilities;
using IBM.Watson.DeveloperCloud.Connection;
using IBM.Watson.DeveloperCloud.DataTypes;
using MiniJSON;
using UnityEngine.UI;
using FullSerializer;

public class WatsonAgent : MonoBehaviour {

public string literalEntityCity;
public Text DestinationField;
public Text DepartureField;
public Text DepartureTime;
public Text StayingPeriod;
public Text AdultsField;
public Text KidsField;
public Text RoomsField;
public Text TransportationField;
public string destinationCity;
public string departureCity;

[SerializeField]
private fsSerializer _serializer = new fsSerializer();

[System.Serializable]
public class CredentialInformation
{
    public string username, password, url;
}

[System.Serializable]
public class Services
{
    public CredentialInformation
        textToSpeech, 
        languageTranslator, 
        personality_insights, 
        conversation, 
        speechToText, 
        toneAnalyzer;
}

[Header("Credentials")]
[Space]
public Services
    serviceCredentials;

[Space]
[Header("Agent voice settings")]
[Space]
public AudioSource
    voiceSource;

public VoiceType
    voiceType;

[Space]
[Header("Conversation settings")]
[Space]
public string
    workspaceId;

[Space]
[Header("Feedback fields")]
[Space]
public Text
    speechToTextField;
public Text 
    conversationInputField;
public Text
    conversationOutputField;

[System.Serializable]
public class Emotion
{
    public string 
        emotionId;
    public float 
        power;
}

[Space]
[Header("Emotions (read only)")]
[Space]
public List<Emotion> 
    emotions = new List<Emotion>();

public enum SocialState
{
    idle, listening, thinking, talking
}

[Space]
[Header("Agent social behaviour (read only)")]
[Space]
public SocialState
    characterState;

// services
SpeechToText
    speechToText;

private int 
    recordingRoutine = 0,
    recordingBufferSize = 1,
    recordingHZ = 22050;

private string 
    microphoneID = null;

private AudioClip 
    recording = null;

TextToSpeech 
    textToSpeech;

Conversation
    conversation;

private Dictionary<string, object> 
    conversationContext = null;

ToneAnalyzer
    toneAnalyzer;


private void Start()
{
    PrepareCredentials();
    Initialize();
}

void PrepareCredentials()
{
    speechToText = new SpeechToText(GetCredentials(serviceCredentials.speechToText));
    textToSpeech = new TextToSpeech(GetCredentials(serviceCredentials.textToSpeech));
    conversation = new Conversation(GetCredentials(serviceCredentials.conversation));
    toneAnalyzer = new ToneAnalyzer(GetCredentials(serviceCredentials.toneAnalyzer));
}

Credentials GetCredentials (CredentialInformation credentialInformation)
{
    return new Credentials(credentialInformation.username, credentialInformation.password, credentialInformation.url);
}

void Initialize ()
{
    conversation.VersionDate = "2017-05-26";
    toneAnalyzer.VersionDate = "2017-05-26";
    Active = true;
    StartRecording();
}

// speech to text
public bool Active
{
    get { return speechToText.IsListening; }
    set
    {
        if (value && !speechToText.IsListening)
        {
            speechToText.DetectSilence = true;
            speechToText.EnableWordConfidence = true;
            speechToText.EnableTimestamps = true;
            speechToText.SilenceThreshold = 0.01f;
            speechToText.MaxAlternatives = 0;
            speechToText.EnableInterimResults = true;
            speechToText.OnError = OnSpeechError;
            speechToText.InactivityTimeout = -1;
            speechToText.ProfanityFilter = false;
            speechToText.SmartFormatting = true;
            speechToText.SpeakerLabels = false;
            speechToText.WordAlternativesThreshold = null;
            speechToText.StartListening(OnSpeechRecognize);
        }
        else if (!value && speechToText.IsListening)
        {
            speechToText.StopListening();
        }
    }
}

private void StartRecording()
{
    if (recordingRoutine == 0)
    {
        UnityObjectUtil.StartDestroyQueue();
        recordingRoutine = Runnable.Run(RecordingHandler());
    }
}

private void StopRecording()
{
    if (recordingRoutine != 0)
    {
        Microphone.End(microphoneID);
        Runnable.Stop(recordingRoutine);
        recordingRoutine = 0;
    }
}

private void OnSpeechError(string error)
{
    Active = false;

    Log.Debug("ExampleStreaming.OnError()", "Error! {0}", error);
}

private IEnumerator RecordingHandler()
{
    recording = Microphone.Start(microphoneID, true, recordingBufferSize, recordingHZ);
    yield return null;      // let _recordingRoutine get set..

    if (recording == null)
    {
        StopRecording();
        yield break;
    }

    bool bFirstBlock = true;
    int midPoint = recording.samples / 2;
    float[] samples = null;

    while (recordingRoutine != 0 && recording != null)
    {
        int writePos = Microphone.GetPosition(microphoneID);
        if (writePos > recording.samples || !Microphone.IsRecording(microphoneID))
        {
            Debug.Log("Microphone disconnected.");
            StopRecording();
            yield break;
        }

        if ((bFirstBlock && writePos >= midPoint) || (!bFirstBlock && writePos < midPoint))
        {
            // front block is recorded, make a RecordClip and pass it onto our callback.
            samples = new float[midPoint];
            recording.GetData(samples, bFirstBlock ? 0 : midPoint);

            AudioData record = new AudioData();
            record.MaxLevel = Mathf.Max(Mathf.Abs(Mathf.Min(samples)), Mathf.Max(samples));
            record.Clip = AudioClip.Create("Recording", midPoint, recording.channels, recordingHZ, false);
            record.Clip.SetData(samples, 0);

            speechToText.OnListen(record);

            bFirstBlock = !bFirstBlock;
        }
        else
        {
            // calculate the number of samples remaining until we ready for a block of audio, 
            // and wait that amount of time it will take to record.
            int remaining = bFirstBlock ? (midPoint - writePos) : (recording.samples - writePos);
            float timeRemaining = (float)remaining / (float) recordingHZ;

            yield return new WaitForSeconds(timeRemaining);
        }
    }

    yield break;
}

private void OnSpeechRecognize(SpeechRecognitionEvent result)
{
    if (result != null && result.results.Length > 0)
    {
        foreach (var res in result.results)
        {
            foreach (var alt in res.alternatives)
            {

                string text = string.Format("{0} ({1}, {2:0.00})\n", alt.transcript, res.final ? "Final" : "Interim", alt.confidence);
                // Log.Debug("ExampleStreaming.OnRecognize()", text);

                if (speechToTextField != null)
                {
                    speechToTextField.text = text;
                }

                if (res.final)
                {
                    if (characterState == SocialState.listening)
                    {
                        Debug.Log("WATSON | Speech to text recorded: \n" + alt.transcript);
                        StartCoroutine(Message(alt.transcript));
                    }
                } else
                {
                    if(characterState == SocialState.idle)
                    {
                        characterState = SocialState.listening;
                    }
                }
            }
        }
    }
}


// text to speech
private IEnumerator Synthesize(string text)
{
    Debug.Log("WATSON CALL | Synthesize input: \n" + text);

    textToSpeech.Voice = voiceType;
    bool doSynthesize = textToSpeech.ToSpeech(HandleSynthesizeCallback, OnFail, text, true);

    if(doSynthesize)
    {
        StartCoroutine(Analyze(text));
        characterState = SocialState.talking;
    }

    yield return null;
}

void HandleSynthesizeCallback(AudioClip clip, Dictionary<string, object> customData = null)
{
    if (Application.isPlaying && clip != null)
    {
        Invoke("ResumeIdle", clip.length);
        voiceSource.clip = clip;
        voiceSource.Play();
    }
}

void ResumeIdle()
{
    characterState = SocialState.idle;
}

// conversation
private IEnumerator Message (string text)
{
    Debug.Log("WATSON | Conversation input: \n" + text);

    MessageRequest messageRequest = new MessageRequest()
    {
        input = new Dictionary<string, object>()
        {
            { "text", text }
        },
        context = conversationContext
    };
    bool doMessage = conversation.Message(HandleMessageCallback, OnFail, workspaceId, messageRequest);

    if(doMessage)
    {
        characterState = SocialState.thinking;

        if (conversationInputField != null)
        {
            conversationInputField.text = text;
        }
    }

    yield return null;
}

    void HandleMessageCallback (object resp, Dictionary<string, object> customData)
{
    object _tempContext = null;
    (resp as Dictionary<string, object>).TryGetValue("context", out _tempContext);

    if (_tempContext != null)
        conversationContext = _tempContext as Dictionary<string, object>;
    string contextList = conversationContext.ToString();

    Dictionary<string, object> dict = Json.Deserialize(customData["json"].ToString()) as Dictionary<string, object>;
    //Debug.Log("Context data: " + customData["json"].ToString());
    // load output --> text;answer from Json node
    Dictionary <string, object> output = dict["output"] as Dictionary<string, object>;

    var context = dict["context"] as Dictionary<string, object>;
    if (context["destination_city"] != null)
    {
        destinationCity = context["destination_city"].ToString();
        Debug.Log("Destination city: " + destinationCity);
        DestinationField.text = "Destination: " + destinationCity;
    }
    if (context["departure_city"] != null)
    {
        departureCity = context["departure_city"].ToString();
        DepartureField.text = "Departure: " + departureCity;
    }
    if (context["DateBegin"] != null && context["date"] != null)
    {
        string dateBegin = context["DateBegin"].ToString();
        string dateEnd = context["DateEnd"].ToString();
        StayingPeriod.text = "Stay: " + dateBegin + " - " + dateEnd;
    }
    if (context["PeriodNumber"] != null && context["PeriodDate"] != null && context["DateEnd"] != null)
    {
        string periodNumber = context["PeriodNumber"].ToString();
        string periodDate = context["PeriodDate"].ToString();
        string dateEnd = context["DateEnd"].ToString();
        StayingPeriod.text = "Stay: " + periodNumber + " " + periodDate + " - " + dateEnd;
    }
    if (context["time"] != null)
    {
        string timeInfo = context["time"].ToString();
        DepartureTime.text = "Time: " + timeInfo;
    }

    List<object> text = output["text"] as List<object>;
    string answer = text[0].ToString(); //returns only the first response

    Debug.Log("WATSON | Conversation output: \n" + answer);

    if (conversationOutputField != null)
    {
        conversationOutputField.text = answer;
    }

    fsData fsdata = null;
    fsResult r = _serializer.TrySerialize(resp.GetType(), resp, out fsdata);
    if (!r.Succeeded)
    {
        throw new WatsonException(r.FormattedMessages);
    }

        //convert fsdata to MessageResponse
    MessageResponse messageResponse = new MessageResponse();
    object obj = messageResponse;
    r = _serializer.TryDeserialize(fsdata, obj.GetType(), ref obj);

    if (!r.Succeeded)
    {
        throw new WatsonException(r.FormattedMessages);
    }

    if (resp != null)
    {
        //Recognize intents & entities
        if (messageResponse.intents.Length > 0 && messageResponse.entities.Length > 0)
        {
            string intent = messageResponse.intents[0].intent;
            string entity = messageResponse.entities[0].entity;
            string literalEntity = messageResponse.entities[0].value;
            if (entity == "city")
            {
                literalEntityCity = literalEntity;
            }
            if (intent == "weather" && entity == "city")
            {
                literalEntityCity = literalEntity;
            }
        }
        if (messageResponse.intents.Length > 0)
        {
            string intent = messageResponse.intents[0].intent;
            //intent name
            Debug.Log("Intent: " + intent);
        }
        if (messageResponse.entities.Length > 0)
        {
            string entity = messageResponse.entities[0].entity;
            //entity name
            Debug.Log("Entity: " + entity);
            string literalEntity = messageResponse.entities[0].value;
            //literal spoken entity
            Debug.Log("Entity Literal: " + literalEntity);
            if (entity == "city")
            {
                literalEntityCity = literalEntity;
            }
        }
    }

    StartCoroutine(Synthesize(answer));
}

// tone analyzer
private IEnumerator Analyze (string text)
{
    Debug.Log("WATSON | Tone analyze input: \n" + text);

    bool doAnalyze = toneAnalyzer.GetToneAnalyze(HandleAnalyzeCallback, OnFail, text);

    yield return null;
}

private void HandleAnalyzeCallback(ToneAnalyzerResponse resp, Dictionary<string, object> customData)
{
    Dictionary<string, object> dict = Json.Deserialize(customData["json"].ToString()) as Dictionary<string, object>;

    Dictionary<string, object> document_tone = dict["document_tone"] as Dictionary<string, object>;

    List<object> tone_categories = document_tone["tone_categories"] as List<object>;

    string debugOutput = "";

    foreach (object tone in tone_categories)
    {
        Dictionary<string, object> category = (Dictionary<string, object>)tone;

        List<object> newTone = category["tones"] as List<object>;

        foreach (object insideTone in newTone)
        {
            Dictionary<string, object> tonedict = (Dictionary<string, object>)insideTone;

            float score = float.Parse(tonedict["score"].ToString());
            string id = tonedict["tone_id"].ToString();

            bool emotionAvailable = false;

            foreach(Emotion emotion in emotions)
            {
                if(emotion.emotionId == id)
                {
                    emotionAvailable = true;
                    emotion.power = score;
                    debugOutput += emotion.emotionId + " : " + emotion.power.ToString() + " - ";
                    break;
                }
            }

            if(!emotionAvailable)
            {
                Emotion newEmotion = new Emotion();
                newEmotion.emotionId = id;
                newEmotion.power = score;
                emotions.Add(newEmotion);
                debugOutput += newEmotion.emotionId + " : " + newEmotion.power.ToString() + " - ";
            }
        }
    }

    Debug.Log("WATSON | Tone analyze output: \n" + debugOutput);
}

private void OnFail(RESTConnector.Error error, Dictionary<string, object> customData)
{
    Log.Error("WatsonAgent.OnFail()", "Error received: {0}", error.ToString());
}
}
使用系统集合;
使用System.Collections.Generic;
使用UnityEngine;
使用IBM.Watson.DeveloperCloud.Services.TextToSpeech.v1;
使用IBM.Watson.developerLoud.Services.Conversation.v1;
//使用IBM.Watson.developerLoud.Services.Assistant.v1;
使用IBM.Watson.DeveloperCloud.Services.ToneAnalyzer.v3;
使用IBM.Watson.DeveloperCloud.Services.SpeechToText.v1;
使用IBM.Watson.DeveloperCloud.Logging;
使用IBM.Watson.DeveloperCloud.Utilities;
使用IBM.Watson.DeveloperCloud.Connection;
使用IBM.Watson.DeveloperCloud.DataTypes;
使用MiniJSON;
使用UnityEngine.UI;
使用FullSerializer;
公共类WatsonAgent:单行为{
公共字符串文字性;
公共文本目的地;
公共文本部门字段;
公共文本发布时间;
公共文本保存期;
公共文本成人领域;
公共文本基茨菲尔德;
公共文本室;
公共文本传输领域;
公共字符串目的性;
公共部门;
[序列化字段]
私有fsSerializer_serializer=新fsSerializer();
[系统可序列化]
公共等级证书信息
{
公共字符串用户名、密码、url;
}
[系统可序列化]
公共课服务
{
公共认证信息
textToSpeech,
语言翻译,
个性与洞察力,
会话
speechToText,
音调分析器;
}
[标题(“凭证”)]
[空格]
公共服务
服务凭证;
[空格]
[标题(“代理语音设置”)]
[空格]
公共音频源
声源;
公共语音类型
语音类型;
[空格]
[标题(“对话设置”)]
[空格]
公共字符串
工作空间ID;
[空格]
[标题(“反馈字段”)]
[空格]
公共文本
speechToTextField;
公共文本
会话输入字段;
公共文本
会话输出字段;
[系统可序列化]
公众阶级情感
{
公共字符串
情绪化;
公众浮票
权力
}
[空格]
[标题(“情绪(只读)”)]
[空格]
公开名单
情感=新列表();
公共枚举社会状态
{
闲散的,听的,想的,说的
}
[空格]
[标题(“代理社会行为(只读)”)]
[空格]
公共社会国家
性格状态;
//服务
演讲文字
演讲文字;
私有整数
recordingRoutine=0,
recordingBufferSize=1,
recordingHZ=22050;
私有字符串
microphoneID=null;
私人音频剪辑
记录=空;
TextToSpeech
textToSpeech;
会话
会话
私人词典
conversationContext=null;
音调分析器
音调分析器;
私有void Start()
{
PrepareCredentials();
初始化();
}
无效预处理基准()
{
speechToText=newspeechtotext(GetCredentials(serviceccredentials.speechToText));
textToSpeech=newtexttospeech(GetCredentials(servicecodentials.textToSpeech));
对话=新对话(GetCredentials(serviceCredentials.conversation));
toneAnalyzer=新的toneAnalyzer(GetCredentials(serviceCredentials.toneAnalyzer));
}
凭证获取凭证(凭证信息凭证信息)
{
返回新凭据(credentialinfo.username、credentialinfo.password、credentialinfo.url);
}
无效初始化()
{
conversation.VersionDate=“2017-05-26”;
toneAnalyzer.VersionDate=“2017-05-26”;
主动=真;
StartRecording();
}
//从语音到文本
公共图书馆
{
获取{return speechToText.IsListening;}
设置
{
if(value&!speechToText.IsListening)
{
speechToText.DetectSilence=true;
speechToText.EnableWordConfidence=true;
speechToText.EnableTimestamps=true;
speechToText.silenethreshold=0.01f;
speechToText.maxAlternations=0;
speechToText.EnableInterimResults=true;
speechToText.OnError=OnSpeechError;
speechToText.InactivityTimeout=-1;
speechToText.亵渎过滤器=false;
speechToText.SmartFormatting=true;
speechToText.SpeakerLabels=false;
speechToText.WordAlternativesThreshold=null;
speechToText.StartListening(在SpeechRecognize上);
}
else if(!value&&speechToText.IsListening)
{
speechToText.StopListening();
}
}
}
私有无效开始记录()
{
如果(记录例程==0)
{
UnityObjective.StartDestroyQueue();
recordingRoutine=Runnable.Run(RecordingHandler());
}
}
私有void StopRecording()
{
如果(录制例程!=0)
{
麦克风。结束(麦克风ID);
可运行。停止(记录例程);
记录程序=0;
}
}
私有void OnSpeechError(字符串错误)
{
主动=假;
Debug(“examplestreating.OnError(),“Error!{0}”,Error);
}
私有IEnumerator RecordingHandler()
{
录制=麦克风.Start(麦克风ID,true,recordingBufferSize,recordingHZ);
yield return null;//设置_recordingRoutine。。
if(记录==null)
{
停止录制();
屈服断裂;
}
bool bFirstBlock=true;
int midPoint=记录。样本数/2;
float[]samples=null;
while(recordingRoutine!=0&&recording!=null)
{
int writePos=麦克风.GetPosition(麦克风ID);
if(writePos>recording.samples | | |!麦克风.IsRecording(麦克风ID))
{
Debug.Log(“麦克风断开
var context = dict["context"] as Dictionary<string, object>;
context.Add("newItem", "Value here");