Asynchronous 试图找出未来,异步,等待在颤振认知面部识别
在下面提到的代码中,我试图等待faceAPIOne()和faceAPITwo函数以及faceVerify()函数的返回值。目前,我在null异常上调用了方法,然后我打印了faceAPIOne的值。我做错了什么?我试图参考下面提到的链接 您可以使用下面提到的代码来重现问题Asynchronous 试图找出未来,异步,等待在颤振认知面部识别,asynchronous,flutter,dart,async-await,future,Asynchronous,Flutter,Dart,Async Await,Future,在下面提到的代码中,我试图等待faceAPIOne()和faceAPITwo函数以及faceVerify()函数的返回值。目前,我在null异常上调用了方法,然后我打印了faceAPIOne的值。我做错了什么?我试图参考下面提到的链接 您可以使用下面提到的代码来重现问题 FaceOneModel faceOneModel; var faceVerifyIdOne; List<dynamic> decodedFace1; Future<String> faceAPIOne(
FaceOneModel faceOneModel;
var faceVerifyIdOne;
List<dynamic> decodedFace1;
Future<String> faceAPIOne() async {
final bytes = image.readAsBytesSync();
var uri = Uri.parse(
"https://eastus.api.cognitive.microsoft.com/face/v1.0/detect?returnFaceId=true");
var request = http.Request("POST", uri)
..headers['Ocp-Apim-Subscription-Key'] =
"Your Key"
..headers['Content-Type'] = "application/octet-stream"
..bodyBytes = bytes;
var response = await request.send();
print(request);
print(response.statusCode);
response.stream.transform(utf8.decoder).listen((value) {
print(value);
decodedFace1 = json.decode(value);
print(decodedFace1);
faceOneModel = FaceOneModel.fromJson(decodedFace1[0]);
print("face one function "+faceOneModel.face1);
// setState(() {
// faceVerifyIdOne = faceOneModel.face1;
// });
});
return faceOneModel.face1;
}
var faceTwo;
var _result;
String ensembleUrl = 'my web url';
Future<Uint8List> _networkImageToByte() async {
final http.Response response = await http.post(
"My webservice Hyperlink",
headers: {
"Content-Type": "application/json",
},
body: jsonEncode(
{
"custId": phoneUserId,
},
),
);
if (response.statusCode == 200) {
var _jsonConvertor = json.decode(response.body);
var _jsonConvertorD = _jsonConvertor['d'];
print(_jsonConvertorD);
List<dynamic> list = json.decode(_jsonConvertorD);
NetworkModel networkModel = NetworkModel.fromJson(list[0]);
_result = networkModel.imageName;
}
Uint8List byteImage = await networkImageToByte(ensembleUrl + _result);
// setState(() {
// faceTwo = byteImage;
// });
return byteImage;
}
var faceVerifyIdTwo;
FaceTwoModel faceTwoModel;
List<dynamic> decodedFace2;
Future<String> faceAPITwo() async {
// faceTwo = await _networkImageToByte();
// final bytes = image.readAsBytesSync();
var uri = Uri.parse(
"https://eastus.api.cognitive.microsoft.com/face/v1.0/detect?returnFaceId=true");
var request = new http.Request("POST", uri)
..headers['Ocp-Apim-Subscription-Key'] =
"your Key"
..headers['Content-Type'] = "application/octet-stream"
..bodyBytes = await _networkImageToByte();
var response = await request.send();
print(request);
print(response.statusCode);
response.stream.transform(utf8.decoder).listen((value) {
print(value);
decodedFace2 = json.decode(value);
print(decodedFace2);
faceTwoModel = FaceTwoModel.fromJson(decodedFace2[0]);
print('face two function'+faceTwoModel.face2);
// setState(() {
// faceVerifyIdTwo = faceTwoModel.face2;
// });
});
return faceTwoModel.face2;
}
ResultModel resultModel;
var verifyFace;
var faceSingle;
var faceDouble;
Future<String> faceVerify() async {
// print('faceOne call back '+await faceAPIOne());
// print('facetow call back '+await faceAPITwo());
// var a = await faceAPIOne();
// var b = await faceAPITwo();
// print(a);
print('face one '+await faceAPIOne());
final response = await http.post(
"https://eastus.api.cognitive.microsoft.com/face/v1.0/verify",
headers: {
"Ocp-Apim-Subscription-Key": "Your Key",
"Content-Type": "application/json",
},
body: jsonEncode(
{"faceId1": 'abc', "faceId2": await faceAPITwo()},
),
);
print(response.body);
verifyFace = json.decode(response.body);
print('${verifyFace['isIdentical']}');
if (verifyFace['isIdentical'] == true) {
showDialog(
barrierDismissible: false,
context: context,
builder: (BuildContext context) {
return AlertDialog(
title: Text(
"Success",
style: TextStyle(color: Colors.green),
),
content: Text(
"Face Verification Success!!!",
style: TextStyle(),
),
actions: <Widget>[
FlatButton(
child: Text(
"OK",
style: TextStyle(),
),
onPressed: () {
Navigator.push(
context,
MaterialPageRoute(builder: (context) => IndexPage()),
);
// Navigator.pop(context);
},
)
],
);
},
);
} else if (verifyFace['isIdentical'] == false) {
showDialog(
barrierDismissible: false,
context: context,
builder: (BuildContext context) {
return AlertDialog(
title: Text(
"Alert",
style: TextStyle(color: Colors.red),
),
content: Text(
"Face Verification Failed!!!",
style: TextStyle(),
),
actions: <Widget>[
FlatButton(
child: Text(
"OK",
style: TextStyle(),
),
onPressed: () {
Navigator.push(
context,
MaterialPageRoute(builder: (context) => OtpPage()),
);
},
),
],
);
},
);
}
return 'success';
}
FaceOneModel FaceOneModel;
var faceVerifyIdOne;
表1;
Future faceAPIOne()异步{
final bytes=image.readAsBytesSync();
var uri=uri.parse(
"https://eastus.api.cognitive.microsoft.com/face/v1.0/detect?returnFaceId=true");
var request=http.request(“POST”,uri)
..headers['Ocp-Apim-Subscription-Key']=
“你的钥匙”
..headers['Content-Type']=“应用程序/八位字节流”
..bodyBytes=字节;
var response=wait request.send();
打印(请求);
打印(响应状态码);
response.stream.transform(utf8.decoder).listen((值){
印刷品(价值);
decodedFace1=json.decode(值);
打印(解码FACE1);
faceOneModel=faceOneModel.fromJson(decodedFace1[0]);
打印(“face one功能”+faceOneModel.face1);
//设置状态(){
//faceVerifyIdOne=faceOneModel.face1;
// });
});
返回faceOneModel.face1;
}
var faceTwo;
var_结果;
String ensembleUrl='我的web url';
Future _networkImageToByte()异步{
final http.Response Response=wait http.post(
“我的Web服务超链接”,
标题:{
“内容类型”:“应用程序/json”,
},
正文:JSONECODE(
{
“custId”:phoneUserId,
},
),
);
如果(response.statusCode==200){
var_jsonConvertor=json.decode(response.body);
var _jsonConvertorD=_jsonConvertorD['d'];
打印(_jsonConvertorD);
List=json.decode(_jsonConvertorD);
NetworkModel NetworkModel=NetworkModel.fromJson(列表[0]);
_结果=networkModel.imageName;
}
Uint8List byteImage=等待networkImageToByte(集成URL+_结果);
//设置状态(){
//faceTwo=byteImage;
// });
通过teimage返回;
}
var faceVerifyIdTwo;
FaceTwoModel FaceTwoModel;
表2;
Future faceAPITwo()异步{
//faceTwo=等待_networkImageToByte();
//final bytes=image.readAsBytesSync();
var uri=uri.parse(
"https://eastus.api.cognitive.microsoft.com/face/v1.0/detect?returnFaceId=true");
var request=newhttp.request(“POST”,uri)
..headers['Ocp-Apim-Subscription-Key']=
“你的钥匙”
..headers['Content-Type']=“应用程序/八位字节流”
…bodyBytes=等待_networkImageToByte();
var response=wait request.send();
打印(请求);
打印(响应状态码);
response.stream.transform(utf8.decoder).listen((值){
印刷品(价值);
decodedFace2=json.decode(值);
打印(解码FACE2);
faceTwoModel=faceTwoModel.fromJson(decodedFace2[0]);
打印('face two function'+FaceToModel.face2);
//设置状态(){
//faceVerifyIdTwo=FaceWoModel.face2;
// });
});
返回FaceToModel.face2;
}
结果模型结果模型;
var验证面;
var-faceSingle;
var faceDouble;
Future faceVerify()异步{
//打印('faceOne回调'+等待FaceApoine());
//打印('facetow回调'+等待faceAPITwo());
//var a=等待faceAPIOne();
//var b=等待faceAPITwo();
//印刷品(a);
打印('face one'+等待face Apoine());
最终响应=等待http.post(
"https://eastus.api.cognitive.microsoft.com/face/v1.0/verify",
标题:{
“Ocp Apim订阅密钥”:“您的密钥”,
“内容类型”:“应用程序/json”,
},
正文:JSONECODE(
{“faceId1”:“abc”,“faceId2”:等待faceAPITwo(),
),
);
打印(响应.正文);
verifyFace=json.decode(response.body);
打印(“${verifyFace['isIdentical']}”);
if(verifyFace['isIdentical']==true){
显示对话框(
禁止:错误,
上下文:上下文,
生成器:(BuildContext上下文){
返回警报对话框(
标题:正文(
“成功”,
样式:TextStyle(颜色:Colors.green),
),
内容:文本(
“人脸验证成功!!!”,
样式:TextStyle(),
),
行动:[
扁平按钮(
子:文本(
“好的”,
样式:TextStyle(),
),
已按下:(){
导航器。推(
上下文
MaterialPage路由(生成器:(上下文)=>IndexPage()),
);
//Navigator.pop(上下文);
},
)
],
);
},
);
}else if(verifyFace['isIdentical']==false){
显示对话框(
禁止:错误,
上下文:上下文,
生成器:(BuildContext上下文){
返回警报对话框(
标题:正文(
“警报”,
样式:TextStyle(颜色:Colors.red),
),
内容:文本(
“人脸验证失败!!!”,
样式:TextStyle(),
),
行动:[
扁平按钮(
子:文本(
“好的”,
样式:TextStyle(),
),
已按下:(){
导航器。推(
上下文
MaterialPage路由(生成器:(上下文)=>OtpPage()),
);
},
),
],
);
},
);
}
回归"成功",;
}
提前感谢您的帮助。
faceOneModel
不会在您到达faceOneAPI
的最后一行时填充,因为流没有机会处理响应
假设您使用的是包:http
,可以执行以下操作以避免使用流:
Future<String> faceAPIOne() async {
// ...
// Takes a StreamResponse and converts it to a Response
// which has the entire response body
final value = await http.Response.fromStream(response);
faceOneModel = json.decode(value.body);
// ...
return faceOneModel.face1;
}
Future faceAPIOne()异步{
// ...
//获取StreamResponse并将其转换为响应
//哪个没有