Warning: file_get_contents(/data/phpspider/zhask/data//catemap/0/mercurial/2.json): failed to open stream: No such file or directory in /data/phpspider/zhask/libs/function.php on line 167

Warning: Invalid argument supplied for foreach() in /data/phpspider/zhask/libs/tag.function.php on line 1116

Notice: Undefined index: in /data/phpspider/zhask/libs/function.php on line 180

Warning: array_chunk() expects parameter 1 to be array, null given in /data/phpspider/zhask/libs/function.php on line 181
如何使用Terraform基于Cloudwatch日志度量过滤器向SNS发送Cloudwatch警报_Terraform_Amazon Cloudwatch_Amazon Cloudwatch Metrics - Fatal编程技术网

如何使用Terraform基于Cloudwatch日志度量过滤器向SNS发送Cloudwatch警报

如何使用Terraform基于Cloudwatch日志度量过滤器向SNS发送Cloudwatch警报,terraform,amazon-cloudwatch,amazon-cloudwatch-metrics,Terraform,Amazon Cloudwatch,Amazon Cloudwatch Metrics,我正在测试一个cloudwatch报警过滤器(精确地检查内存中的lambda是否已达到最大值),将其发送到SNS,然后发送到SQS队列。但是,我没有在日志中看到过滤器。 设置为cloudwatch(过滤报警)->SNS->SQS->splunk 到目前为止,我所拥有的: resource "aws_cloudwatch_metric_alarm" "general_lambda_error" { depends_on = [ "aws_cloudwatch_log_metri

我正在测试一个cloudwatch报警过滤器(精确地检查内存中的lambda是否已达到最大值),将其发送到SNS,然后发送到SQS队列。但是,我没有在日志中看到过滤器。 设置为
cloudwatch(过滤报警)->SNS->SQS->splunk

到目前为止,我所拥有的:

resource "aws_cloudwatch_metric_alarm" "general_lambda_error" {
    depends_on = [
      "aws_cloudwatch_log_metric_filter.max_memory_time_out",
    ]   
    alarm_name                = "general_lambda_error"
    comparison_operator       = "GreaterThanOrEqualToThreshold" 
    evaluation_periods        = "1" 
    metric_name               = "Errors" 
    namespace                 = "AWS/Lambda" 
    period                    = "60" 
    statistic                 = "SampleCount" 
    threshold                 = "2"
    alarm_description         = "This metric monitors Lambda Memory Max Usage and other Errors: threshold=2"
    alarm_actions             = [ "some-arn" ]
    dimensions {
      FunctionName = "lambda-test"
      Resource = "lambda-test"
    }
}
resource "aws_cloudwatch_log_metric_filter" "max_memory_time_out" {
    name                      = "max_memory_time_out"
    pattern                   = "[report_name=\"REPORT\", 
  request_id_name=\"RequestId:\", request_id_value, duration_name=\"Duration:\", duration_value, duration_unit=\"ms\", billed_duration_name_1=\"Billed\", bill_duration_name_2=\"Duration:\", billed_duration_value, billed_duration_unit=\"ms\", memory_size_name_1=\"Memory\", memory_size_name_2=\"Size:\", memory_size_value, memory_size_unit=\"MB\", max_memory_used_name_1=\"Max\", max_memory_used_name_2=\"Memory\", max_memory_used_name_3=\"Used:\", max_memory_used_value, max_memory_used_unit=\"MB\"]"
    log_group_name            = "/aws/lambda/lambda-test"
    metric_transformation {
      name      = "SampleCount"
      namespace = "cloudwatch_filter"
      value     = "1"
    }
}

如何发送过滤后的邮件?我发现这描述了类似的问题,但解决方案是创建一个lambda函数。我可以在不创建lambda函数的情况下执行此操作吗?

以下是将cloudwatch日志获取到S3的代码:

resource "aws_iam_role" "cloudwatchToFirehose" {
  name               = "${var.env}-${var.name}FirehoseCWL-Role"

  assume_role_policy = <<JSON
{
  "Version": "2012-10-17",
  "Statement": [{
      "Action": "sts:AssumeRole",
      "Principal": { "Service": "logs.${var.region}.amazonaws.com"},
      "Effect": "Allow"
  }]
}
JSON
}

resource "aws_iam_policy" "cloudwatchToFirehose" {
  name   = "${var.env}-${var.name}FirehoseCWL-Policy"

  policy = <<JSON
{
  "Version": "2012-10-17",
  "Statement": [
    {
      "Effect":"Allow",
      "Action": [
                "firehose:DeleteDeliveryStream",
                "firehose:PutRecord",
                "firehose:PutRecordBatch",
                "firehose:UpdateDestination"
      ],
      "Resource": ["${aws_kinesis_firehose_delivery_stream.firehoseToS3.arn}"]
    }
  ]
}
JSON
}

resource "aws_iam_role_policy_attachment" "cloudwatchToFirehose" {
  role       = "${aws_iam_role.cloudwatchToFirehose.name}"
  policy_arn = "${aws_iam_policy.cloudwatchToFirehose.arn}"
}

resource "aws_iam_role" "firehoseToS3" {
  name               = "${var.env}-${var.name}FirehoseS3-Role"
  assume_role_policy = <<JSON
{
  "Version": "2012-10-17",
  "Statement": [{
      "Action": "sts:AssumeRole",
      "Principal": { "Service": "firehose.amazonaws.com"},
      "Effect": "Allow"
  }]
}
JSON

  lifecycle {
    create_before_destroy = true
  }
}

resource "aws_iam_policy" "firehoseToS3" {
  name   = "${var.env}-${var.name}FirehoseS3-Policy"
  policy = <<JSON
{
  "Version": "2012-10-17",
  "Statement": [
        {
            "Effect": "Allow",
            "Action": [
                "s3:AbortMultipartUpload",
                "s3:GetBucketLocation",
                "s3:GetObject",
                "s3:ListBucket",
                "s3:ListBucketMultipartUploads",
                "s3:PutObject",
                "s3:PutObjectAcl"
            ],
            "Resource": [
                "arn:aws:s3:::${var.logs_bucket}",
                "arn:aws:s3:::${var.logs_bucket}/*"
            ]
        },
        {
            "Effect": "Allow",
            "Action": [
                "kinesis:DescribeStream",
                "kinesis:GetShardIterator",
                "kinesis:GetRecords"
            ],
            "Resource": "${aws_kinesis_firehose_delivery_stream.firehoseToS3.arn}"
        },
        {
           "Effect": "Allow",
           "Action": [
               "kms:Decrypt",
               "kms:GenerateDataKey"
           ],
           "Resource": [
               "${var.kms_general_key}"
           ]
        },
        {
           "Effect": "Allow",
           "Action": [
               "logs:PutLogEvents"
           ],
           "Resource": [
               "arn:aws:logs:*:*:log-group:${var.org}-${var.group}-${var.environment}/Firehose:*"
           ]
        },
        {
           "Effect": "Allow",
           "Action": [
               "lambda:InvokeFunction",
               "lambda:GetFunctionConfiguration"
           ],
           "Resource": [
               "arn:aws:lambda:*:*:function:*:*"
           ]
        }
  ]
}
JSON

  lifecycle {
    create_before_destroy = true
  }
}

resource "aws_iam_role_policy_attachment" "firehoseToS3" {
  role       = "${aws_iam_role.firehoseToS3.name}"
  policy_arn = "${aws_iam_policy.firehoseToS3.arn}"
  lifecycle {
    create_before_destroy = true
  }
}

resource "aws_kinesis_firehose_delivery_stream" "firehoseToS3" {
  name        = "${var.env}-${var.name}Firehose-Stream"
  destination = "s3"

  s3_configuration {
    role_arn        = "${aws_iam_role.firehoseToS3.arn}"
    bucket_arn      = "arn:aws:s3:::${var.logs_bucket}"
    buffer_interval = "300"
    buffer_size     = "10"
    prefix          = "${var.name}"
  }
}

resource "aws_cloudwatch_log_subscription_filter" "cloudwatchToFirehose" {
  count           = "1"
  name            = "${var.env}-${var.name}Filter-Subscription"
  role_arn        = "${aws_iam_role.cloudwatchToFirehose.arn}"
  log_group_name  = "${element(var.log_groups, count.index)}"
  filter_pattern  = ""
  destination_arn = "${aws_kinesis_firehose_delivery_stream.firehoseToS3.arn}"
}
资源“aws\u iam\u角色”“cloudwatchToFirehose”{
name=“${var.env}-${var.name}FirehoseCWL角色”

假设\u role\u policy=您将日志过滤器与日志订阅过滤器弄错了。日志过滤器用于创建度量,然后可用于创建报警。日志订阅过滤器用于将过滤后的内容发送到目标,如Kinesis。我通过创建消防软管将数据发送到S3,然后创建S3 notf来完成此操作您还可以使用订阅筛选器将日志发送到另一个lambda函数,并让该lambda函数将日志条目写入SQS@victorm你能提供一个使用kinesis消防软管的小例子吗?这是否意味着我可以使用log metric filter来创建报警?请参见下面的示例。