Python 从node.js调用Sagemaker Tensorflow Resnet50端点
问题 我想知道node.js中与此Python代码等效的代码:Python 从node.js调用Sagemaker Tensorflow Resnet50端点,python,node.js,keras,amazon-sagemaker,Python,Node.js,Keras,Amazon Sagemaker,问题 我想知道node.js中与此Python代码等效的代码: from keras.preprocessing import image from PIL import Image from keras.applications.resnet50 import preprocess_input raw_img = image.load_img("some/path").resize((224, 224), Image.NEAREST) img = preprocess_input(image
from keras.preprocessing import image
from PIL import Image
from keras.applications.resnet50 import preprocess_input
raw_img = image.load_img("some/path").resize((224, 224), Image.NEAREST)
img = preprocess_input(image.img_to_array(raw_img))
上下文
我将Keras的ResNet50模型上传到SageMaker端点。我可以使用下面的代码从Python调用它:
import json
import boto3
import numpy as np
import io
client = boto3.client('runtime.sagemaker')
from keras.preprocessing import image
from PIL import Image
from keras.applications.resnet50 import preprocess_input
raw_img = image.load_img("some/path").resize((224, 224), Image.NEAREST)
img = preprocess_input(image.img_to_array(raw_img))
response = client.invoke_endpoint(
EndpointName="SAGEMAKER_ENDPOINT_NAME",
Body=json.dumps({ "instances": [ img.tolist() ] }),
ContentType="application/json"
)
现在我需要在node.js中执行同样的操作。我找到了如何使用aws sdk访问端点的方法:
import * as aws from 'aws-sdk';
const sageMaker = new aws.SageMakerRuntime({
region: 'ap-northeast-1'
});
sageMaker.invokeEndpoint({
EndpointName: endpointName,
Body: input,
ContentType: "application/json",
}, (error, res) => {
if (error) { return reject(error); }
// YEAH
})
但是我不知道如何生成输入
json,也就是说,相当于这个python片段:
from keras.preprocessing import image
from PIL import Image
from keras.applications.resnet50 import preprocess_input
raw_img = image.load_img("some/path").resize((224, 224), Image.NEAREST)
img = preprocess_input(image.img_to_array(raw_img))
是否有任何库可以实现同样的效果,或者我需要重新发明轮子?答案是:我必须重新发明一半轮子。 以下是到达端点所需的工作(对于ResNet50网络):
- 将源图像解码为像素
- 转换为3个通道(删除
- 调整大小为224x224
- 将像素规格化为[-1,1]
- 转换为三维阵列(SageMaker中的默认格式)
async function getImagePixels(filePath: string) {
const _getPixels = require('get-pixels');
return new Promise<IImagePixels>( (resolve, reject) => {
_getPixels( filePath, (error: Error | null, pixels: ndarray) => {
if (error) { return reject(error); }
resolve({
width: pixels.shape[0],
height: pixels.shape[1],
numberOfChannels: pixels.shape[2],
pixels: pixels
})
});
})
}
异步函数getImagePixels(文件路径:字符串){
const_getPixels=require('get-pixels');
返回新承诺((解决、拒绝)=>{
_getPixels(文件路径,(错误:error | null,像素:ndarray)=>{
if(error){返回拒绝(error);}
决心({
宽度:像素。形状[0],
高度:像素。形状[1],
NumberOfChannel:像素。形状[2],
像素:像素
})
});
})
}
图像传感器
import ndarray from 'ndarray';
function imageToTensor(img: IImagePixels, o: INormalizationOptions) {
const rawTensorValues = new Int32Array(img.width * img.height * o.numberOfChannels);
const rawTensor = tf.tensor3d(rawTensorValues, [ img.width, img.height, o.numberOfChannels ], 'int32');
// This only uses the first CHANNEL_ND
for (let row=0; row<img.height; row++) {
for (let col=0; col<img.width; col++){
for (let channel =0; channel<o.numberOfChannels; channel++) {
const pixel = img.pixels.get(row,col,channel);
if (!isNumeric(pixel)) { throw new Error(`Bad pixel: ${pixel}`) }
const offset = row * img.width * img.numberOfChannels + col * img.numberOfChannels + channel
rawTensorValues[offset] = pixel;
}
}
}
return rawTensor;
}
从“ndarray”导入ndarray;
功能图像传感器(img:IImagePixels,o:INormalizationOptions){
const rawTensorValues=新的INT32阵列(img.width*img.height*o.numberOfChannels);
常量rawTensor=tf.tensor3d(rawTensorValues,[img.width,img.height,o.numberOfChannels],'int32');
//这只使用第一个通道
for(设行=0;行
import ndarray from 'ndarray';
function imageToTensor(img: IImagePixels, o: INormalizationOptions) {
const rawTensorValues = new Int32Array(img.width * img.height * o.numberOfChannels);
const rawTensor = tf.tensor3d(rawTensorValues, [ img.width, img.height, o.numberOfChannels ], 'int32');
// This only uses the first CHANNEL_ND
for (let row=0; row<img.height; row++) {
for (let col=0; col<img.width; col++){
for (let channel =0; channel<o.numberOfChannels; channel++) {
const pixel = img.pixels.get(row,col,channel);
if (!isNumeric(pixel)) { throw new Error(`Bad pixel: ${pixel}`) }
const offset = row * img.width * img.numberOfChannels + col * img.numberOfChannels + channel
rawTensorValues[offset] = pixel;
}
}
}
return rawTensor;
}
async function tensor3dToArray3d(t: tf.Tensor3D) {
const dataAs3dArray = new Array<number[][]>();
const data = await t.data();
const expectedSizeOfArray = t.shape[0] * t.shape[1] * t.shape[2];
console.log(`Got vector of size ${t.shape[0]} ${t.shape[1]} ${t.shape[2]}, that is, ${expectedSizeOfArray} data`);
console.log(`Actual size of data: ${data.length}`);
for (let i=0; i<t.shape[0]; i++) {
dataAs3dArray[i] = new Array<number[]>();
for (let j=0; j<t.shape[1]; j++) {
dataAs3dArray[i][j] = new Array<number>();
for (let k=0; k<t.shape[2]; k++ ) {
const ijk =
i * t.shape[0] * t.shape[1] * t.shape[2]
+ j * t.shape[1] * t.shape[2]
+ k;
const datum = Math.random(); // Number( data[ijk] );
if (!isNumeric(datum)) { throw new Error(`Invalid datum at ${i},${j},${k}: ${datum}`) }
dataAs3dArray[i][j][k] = datum;
}
}
}
return dataAs3dArray;
}
function isNumeric(n: any) {
return !isNaN(parseFloat(n)) && isFinite(n);
}