Warning: file_get_contents(/data/phpspider/zhask/data//catemap/1/amazon-web-services/12.json): failed to open stream: No such file or directory in /data/phpspider/zhask/libs/function.php on line 167

Warning: Invalid argument supplied for foreach() in /data/phpspider/zhask/libs/tag.function.php on line 1116

Notice: Undefined index: in /data/phpspider/zhask/libs/function.php on line 180

Warning: array_chunk() expects parameter 1 to be array, null given in /data/phpspider/zhask/libs/function.php on line 181
Amazon web services AWS Lambda达到内存限制_Amazon Web Services_Aws Lambda - Fatal编程技术网

Amazon web services AWS Lambda达到内存限制

Amazon web services AWS Lambda达到内存限制,amazon-web-services,aws-lambda,Amazon Web Services,Aws Lambda,我使用这个Lambda函数动态生成缩略图。但我得到了以下错误: REPORT RequestId: 9369f148-2a85-11e7-a571-5f1e1818669e Duration: 188.18 ms Billed Duration: 200 ms Memory Size: 1536 MB Max Memory Used: 1536 MB 而且 RequestId: 9369f148-2a85-11e7-a571-5f1e1818669e Process exited befor

我使用这个Lambda函数动态生成缩略图。但我得到了以下错误:

REPORT RequestId: 9369f148-2a85-11e7-a571-5f1e1818669e Duration: 188.18 ms Billed Duration: 200 ms Memory Size: 1536 MB Max Memory Used: 1536 MB 
而且

RequestId: 9369f148-2a85-11e7-a571-5f1e1818669e Process exited before completing request
所以我想我达到了最大内存限制。如果没有“uploadRecentImage()”函数,它就可以工作。但是如果我给imgVariants[]添加一个新的大小,我也会达到内存限制。 我认为函数处理imgVariants(每个循环)的方式会导致这种情况,但我不知道如何更好。 如果有任何帮助,我将不胜感激

以下是我的功能:

// dependencies
var async = require('async');
var AWS = require('aws-sdk');
var gm = require('gm').subClass({
  imageMagick: true
}); // use ImageMagick
var util = require('util');

// configuration as code - add, modify, remove array elements as desired
var imgVariants = [
  {
    "SIZE": "Large1",
    "POSTFIX": "-l",
    "MAX_WIDTH": 6000,
    "MAX_HEIGHT": 6000,
    "SIZING_QUALITY": 75,
    "INTERLACE": "Line"
  },
    {
    "SIZE": "Large1",
    "POSTFIX": "-l",
    "MAX_WIDTH": 1280,
    "MAX_HEIGHT": 1280,
    "SIZING_QUALITY": 75,
    "INTERLACE": "Line"
  },
  {
    "SIZE": "Large1",
    "POSTFIX": "-l",
    "MAX_WIDTH": 500,
    "MAX_HEIGHT": 500,
    "SIZING_QUALITY": 75,
    "INTERLACE": "Line"
  },
    {
    "SIZE": "Large1",
    "POSTFIX": "-l",
    "MAX_WIDTH": 100,
    "MAX_HEIGHT": 100,
    "SIZING_QUALITY": 75,
    "INTERLACE": "Line"
  }
];
var DST_BUCKET_POSTFIX = "resized";



// get reference to S3 client
var s3 = new AWS.S3();

exports.handler = function (event, context) {
  // Read options from the event.
  console.log("Reading options from event:\n", util.inspect(event, {
    depth: 5
  }));
  var srcBucket = event.Records[0].s3.bucket.name;
  // Object key may have spaces or unicode non-ASCII characters.
  var srcKey = decodeURIComponent(event.Records[0].s3.object.key.replace(/\+/g, " "));
  // derive the file name and extension
  var srcFile = srcKey.match(/(.+)\.([^.]+)/);

  var srcName = srcFile[1];
  var scrExt = srcFile[2];
  // set the destination bucket
  var dstBucket = srcBucket + DST_BUCKET_POSTFIX;


  // make sure that source and destination are different buckets.
  if (srcBucket === dstBucket) {
    console.error("Destination bucket must be different from source bucket.");
    return;
  }

  if (!scrExt) {
    console.error('unable to derive file type extension from file key ' + srcKey);
    return;
  }

  if (scrExt != "jpg" && scrExt != "png") {
    console.log('skipping non-supported file type ' + srcKey + ' (must be jpg or png)');
    return;
  }

  function processImage(data, options, callback) {
    gm(data.Body).size(function (err, size) {

      var scalingFactor = Math.min(
        options.MAX_WIDTH / size.width,
        options.MAX_HEIGHT / size.height
      );
      var width = scalingFactor * size.width;
      var height = scalingFactor * size.height;

      this.resize(width, height)
        .quality(options.SIZING_QUALITY || 75)
        .interlace(options.INTERLACE || 'None')
        .toBuffer(scrExt, function (err, buffer) {
          if (err) {
            callback(err);

          } else {
            uploadImage(data.ContentType, buffer, options, callback);
            uploadRecentImage(data.ContentType, buffer, options, callback);
          }
        });
    });
  }

  function uploadImage(contentType, data, options, callback) {
    // Upload the transformed image to the destination S3 bucket.
    s3.putObject({
        Bucket: dstBucket,
        Key: options.MAX_WIDTH + '/' + srcName + '.' + scrExt,
        Body: data,
        ContentType: contentType
      },
      callback);
  }


  function uploadRecentImage(contentType, data, options, callback) {
    if(options.MAX_WIDTH == 500){
         s3.putObject({
            Bucket: dstBucket,
            Key: 'recent_optimized.' + scrExt,
            Body: data,
            ContentType: contentType
          },
          callback);
    }
    if(options.MAX_WIDTH == 100){
           s3.putObject({
            Bucket: dstBucket,
            Key: 'recent_thumb.' + scrExt,
            Body: data,
            ContentType: contentType
          },
          callback);
     }
  }


  // Download the image from S3 and process for each requested image variant.
  async.waterfall(
    [
      function download(next) {
          // Download the image from S3 into a buffer.
          s3.getObject({
              Bucket: srcBucket,
              Key: srcKey
            },
            next);
      },
      function processImages(data, next) {
          async.each(imgVariants, function (variant, next) {
            processImage(data, variant, next);
          }, next);

      }

    ],
    function (err) {
      if (err) {
        console.error(
          'Unable to resize ' + srcBucket + '/' + srcKey +
          ' and upload to ' + dstBucket +
          ' due to an error: ' + err
        );
      } else {
        console.log(
          'Successfully resized ' + srcBucket + '/' + srcKey +
          ' and uploaded to ' + dstBucket
        );
      }

      context.done();
    }
  );
};
  • 您可以限制并行processImages调用的数量:
  • 替换
    async.each(imgVariants,

    使用
    async.eachLimit(imgVariants,2,

    不并行处理两个以上的图像

  • 该脚本有一个bug:
  • 
    上载映像(data.ContentType、缓冲区、选项、回调);
    uploadRecentImage(data.ContentType、缓冲区、选项、回调);
    
    这将调用
    回调
    两次,这是不允许的。只调用回调一次

  • 脚本有另一个错误:
    事件。记录[0]
    它将只处理第一个图像。如果同时上载多个图像,将丢失一些图像

  • 循环时,您真的需要将已处理的图像保留在内存中吗?为什么不让源代码在循环中调用“lambda”函数,而不是在lambda中循环?如果您需要1.5GB来处理缩略图,则会出现很大的内存泄漏问题。不要尝试一次处理所有图像。除非必须,否则不要将任何内容保留在内存中。请清理在处理每个图像之后,不要使用全局结构,特别是如果您想并行处理图像。在全局结构中,清理是不可能的,您可以考虑使用“调整大小作为服务”。系统,如和。1。但所有imgVariants都会被处理?或者只处理前2个?如果我这样做,我没有内存错误,也没有调整大小的图像:-(