diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS
new file mode 100644
index 0000000..7acc080
--- /dev/null
+++ b/.github/CODEOWNERS
@@ -0,0 +1,4 @@
+# Learn about CODEOWNERS file format:
+# https://help.github.com/en/articles/about-code-owners
+
+* @SumoLogic/sumoappdev
diff --git a/README.md b/README.md
index ab1c7c5..ccc47e5 100644
--- a/README.md
+++ b/README.md
@@ -16,6 +16,8 @@ We put the Lambda functions to read from a particular AWS service (e.g CloudWatc
|[SumoLogic Lambda Function for AWS CloudWatch Logs With Dead Letter Queue Support](cloudwatchlogs-with-dlq)| This project comes with Cloudformation template and two lambda functions which sends CloudWatch logs to Sumo Logic HTTP source endpoint.The first function(invoked by CloudWatch) is configured with DLQ and the second function(invoked periodically by CloudWatch Events) reads from DLQ.| [AWS Lambda ULM App](https://help.sumologic.com/Send-Data/Applications-and-Other-Data-Sources/AWS_Lambda_ULM/Collect_Logs_and_Metrics_for_AWS_Lambda_ULM) | [Docs](https://help.sumologic.com/Send-Data/Collect-from-Other-Data-Sources/Amazon-CloudWatch-Logs)|
|[SumoLogic Function for AWS CloudWatch Events](cloudwatchevents) | This function is invoked by AWS CloudWatch events in response to state change in your AWS resources which matches a event target definition. The event payload received is then forwarded to Sumo Logic HTTP source endpoint. | [AWS GuardDuty App](https://help.sumologic.com/Send-Data/Applications-and-Other-Data-Sources/Amazon-GuardDuty/Collect-Amazon-GuardDuty-Log-Files) | [Docs](cloudwatchevents/README.md) |
|[SumoLogic Function for Amazon Inspector](inspector) | This function subscribes to a SNS topic where Amazon Inspector publishes its findings.It receives the message payload as an input parameter, transforms it and sends it to Sumo Logic HTTP source endpoint| [Amazon Inspector](https://help.sumologic.com/Send-Data/Applications-and-Other-Data-Sources/Amazon-Inspector-App/) | [Docs](https://help.sumologic.com/Send-Data/Applications-and-Other-Data-Sources/Amazon-Inspector-App/01-Collect-Data-for-Amazon-Inspector) |
+| [AWS Kinesis Firehose for Metrics](kinesis-firehose-cloudwatch-collection/metrics)| This project sets up AWS resources to Send AWS CloudWatch metrics using Kinesis Firehose metric streams to Sumo Logic. | CloudWatch Metrics, AWS Observability | [Docs](https://help.sumologic.com/03Send-Data/Sources/02Sources-for-Hosted-Collectors/Amazon-Web-Services/AWS_Kinesis_Firehose_for_Metrics_Source)|
+| [AWS Kinesis Firehose for Logs](kinesis-firehose-cloudwatch-collection/logs)| This project sets up AWS resources to Send AWS CloudWatch Logs using Kinesis Firehose streams to Sumo Logic. | CloudWatch Logs, AWS Observability | [Docs](https://help.sumologic.com/03Send-Data/Sources/02Sources-for-Hosted-Collectors/Amazon-Web-Services/AWS_Kinesis_Firehose_for_Logs_Source)|
|[Kinesis to Sumo Logic](kinesis)| This function is invoked by AWS Lambda after it detects new records in Kinesis stream. The received collection of events are decompressed, transformed and send to Sumo Logic HTTP source endpoint | | [Docs](kinesis/README.md#lambda-configuration) |
|[SumoLogic Lambda Function for AWS CloudWatch Logs](cloudwatchlogs)| This function subscribes to CloudWatch Log Group and is invoked by AWS CloudWatch with log messages as payload. The records received are decompressed, transformed and forwarded to Sumo Logic HTTP source endpoint in chunks.While the function is more simple then the DLQ-based solution above, it doesn't handle failures and retries properly, thus not recommended. | Not Recommended | [Docs](https://help.sumologic.com/Send-Data/Collect-from-Other-Data-Sources/Create-an-Amazon-Lambda-Function) |
| [S3](s3) AND
[Cloudtrail S3 to Sumo Logic](cloudtrail_s3)| This function receives S3 notifications on new files uploaded to the source S3 bucket, then reads these files, unzips them, and breakdown the records before finally sending to HTTP hosted collector endpoint. | DEPRECATED | [Docs](s3/README.md#lambda-setup)
[Docs](cloudtrail_s3#lambda-setup)|
diff --git a/awsautoenableS3Logging/packaged.yaml b/awsautoenableS3Logging/packaged.yaml
index 95d6fe1..b6be531 100644
--- a/awsautoenableS3Logging/packaged.yaml
+++ b/awsautoenableS3Logging/packaged.yaml
@@ -1,7 +1,7 @@
AWSTemplateFormatVersion: '2010-09-09'
Transform: AWS::Serverless-2016-10-31
Description: Lambda Function for auto enable s3 logs for S3 Buckets, VPCs, Subnets,
- Network Interfaces and Application load balancer.
+ Network Interfaces, Application load balancer and Classic load balancer
Globals:
Function:
Timeout: 300
@@ -24,27 +24,27 @@ Metadata:
- s3logging
- flowlogs
Name: sumologic-s3-logging-auto-enable
- SemanticVersion: 1.0.2
- SourceCodeUrl: https://github.com/SumoLogic/sumologic-aws-lambda/tree/master/awsautoenableS3Logging
- LicenseUrl: s3://appdevstore/AutoEnableS3Logs/v1.0.2/978602b5b9ec16f8bab0e38fd6b3998f
- ReadmeUrl: s3://appdevstore/AutoEnableS3Logs/v1.0.2/d05d411471e0bb4db3389f2523f515f0
+ SemanticVersion: 1.0.18
+ SourceCodeUrl: https://github.com/SumoLogic/sumologic-aws-lambda/tree/main/awsautoenableS3Logging
+ LicenseUrl: s3://appdevstore/AutoEnableS3Logs/v1.0.18/978602b5b9ec16f8bab0e38fd6b3998f
+ ReadmeUrl: s3://appdevstore/AutoEnableS3Logs/v1.0.18/d05d411471e0bb4db3389f2523f515f0
SpdxLicenseId: Apache-2.0
Mappings:
Region2ELBAccountId:
us-east-1:
AccountId: '127311923021'
us-east-2:
- AccountId: 033677994240
+ AccountId: '033677994240'
us-west-1:
- AccountId: 027434742980
+ AccountId: '027434742980'
us-west-2:
AccountId: '797873946194'
af-south-1:
- AccountId: 098369216593
+ AccountId: '098369216593'
ca-central-1:
AccountId: '985666609251'
eu-central-1:
- AccountId: 054676820928
+ AccountId: '054676820928'
eu-west-1:
AccountId: '156460612806'
eu-west-2:
@@ -52,7 +52,7 @@ Mappings:
eu-south-1:
AccountId: '635631232127'
eu-west-3:
- AccountId: 009996457667
+ AccountId: '009996457667'
eu-north-1:
AccountId: '897822967062'
ap-east-1:
@@ -74,7 +74,7 @@ Mappings:
sa-east-1:
AccountId: '507241528517'
us-gov-west-1:
- AccountId: 048591011584
+ AccountId: '048591011584'
us-gov-east-1:
AccountId: '190560391635'
cn-north-1:
@@ -86,12 +86,14 @@ Parameters:
Type: String
Description: S3 - To Enable S3 Audit Logging for new S3 buckets. VPC - To Enable
VPC flow logs for new VPC, Subnets and Network Interfaces. ALB - To Enable S3
- Logging for new Application Load Balancer.
+ Logging for new Application Load Balancer. ELB - To Enable S3 logging for new
+ Classic Load Balancer
AllowedPattern: .+
AllowedValues:
- S3
- VPC
- ALB
+ - ELB
AutoEnableResourceOptions:
Type: String
Description: New - Automatically enables S3 logging for newly created AWS resources
@@ -131,23 +133,21 @@ Parameters:
Type: String
Conditions:
enable_alb_logging:
- Fn::And:
- - Fn::Equals:
- - Ref: AutoEnableLogging
- - ALB
- - Condition: auto_enable_new
+ Fn::Equals:
+ - Ref: AutoEnableLogging
+ - ALB
+ enable_elb_logging:
+ Fn::Equals:
+ - Ref: AutoEnableLogging
+ - ELB
enable_s3_buckets_logging:
- Fn::And:
- - Fn::Equals:
- - Ref: AutoEnableLogging
- - S3
- - Condition: auto_enable_new
+ Fn::Equals:
+ - Ref: AutoEnableLogging
+ - S3
enable_vpc_flow_logs_logging:
- Fn::And:
- - Fn::Equals:
- - Ref: AutoEnableLogging
- - VPC
- - Condition: auto_enable_new
+ Fn::Equals:
+ - Ref: AutoEnableLogging
+ - VPC
auto_enable_existing:
Fn::Or:
- Fn::Equals:
@@ -164,6 +164,22 @@ Conditions:
- Fn::Equals:
- Ref: AutoEnableResourceOptions
- Both
+ enable_alb_log_events:
+ Fn::And:
+ - Condition: auto_enable_new
+ - Condition: enable_alb_logging
+ enable_elb_log_events:
+ Fn::And:
+ - Condition: auto_enable_new
+ - Condition: enable_elb_logging
+ enable_s3_log_events:
+ Fn::And:
+ - Condition: auto_enable_new
+ - Condition: enable_s3_buckets_logging
+ enable_vpc_log_events:
+ Fn::And:
+ - Condition: auto_enable_new
+ - Condition: enable_vpc_flow_logs_logging
Resources:
SumoLambdaRole:
Type: AWS::IAM::Role
@@ -185,8 +201,6 @@ Resources:
Action:
- elasticloadbalancing:DescribeLoadBalancerAttributes
- elasticloadbalancing:DescribeLoadBalancers
- - elasticloadbalancing:AddTags
- - elasticloadbalancing:RemoveTags
- elasticloadbalancing:ModifyLoadBalancerAttributes
- logs:CreateLogGroup
- logs:CreateLogStream
@@ -209,13 +223,15 @@ Resources:
- s3:GetBucketLogging
- s3:PutBucketLogging
Resource: '*'
+ Metadata:
+ SamResourceId: SumoLambdaRole
EnableNewAWSResourcesLambda:
Type: AWS::Serverless::Function
Condition: auto_enable_new
Properties:
- CodeUri: s3://appdevstore/sumo_app_utils/v2.0.2/sumo_app_utils.zip
+ CodeUri: s3://appdevstore/sumo_app_utils/v2.0.21/sumo_app_utils.zip
Handler: awsresource.enable_s3_logs
- Runtime: python3.7
+ Runtime: python3.13
Role:
Fn::GetAtt:
- SumoLambdaRole
@@ -238,9 +254,11 @@ Resources:
- Region2ELBAccountId
- Ref: AWS::Region
- AccountId
+ Metadata:
+ SamResourceId: EnableNewAWSResourcesLambda
AutoEnableS3LogEventsInvokePermission:
Type: AWS::Lambda::Permission
- Condition: enable_s3_buckets_logging
+ Condition: enable_s3_log_events
Properties:
Action: lambda:InvokeFunction
FunctionName:
@@ -250,9 +268,11 @@ Resources:
Fn::GetAtt:
- AutoEnableS3LogEventsRuleTrigger
- Arn
+ Metadata:
+ SamResourceId: AutoEnableS3LogEventsInvokePermission
AutoEnableS3LogEventsRuleTrigger:
Type: AWS::Events::Rule
- Condition: enable_s3_buckets_logging
+ Condition: enable_s3_log_events
Properties:
Description: Auto-Enable S3 loggging for S3 Buckets with Lambda from events
EventPattern:
@@ -285,9 +305,11 @@ Resources:
- EnableNewAWSResourcesLambda
- Arn
Id: Main
+ Metadata:
+ SamResourceId: AutoEnableS3LogEventsRuleTrigger
AutoEnableVPCEventsInvokePermission:
Type: AWS::Lambda::Permission
- Condition: enable_vpc_flow_logs_logging
+ Condition: enable_vpc_log_events
Properties:
Action: lambda:InvokeFunction
FunctionName:
@@ -297,9 +319,11 @@ Resources:
Fn::GetAtt:
- AutoEnableVPCEventsRuleTrigger
- Arn
+ Metadata:
+ SamResourceId: AutoEnableVPCEventsInvokePermission
AutoEnableVPCEventsRuleTrigger:
Type: AWS::Events::Rule
- Condition: enable_vpc_flow_logs_logging
+ Condition: enable_vpc_log_events
Properties:
Description: Auto-Enable VPC Flow logs for VPCs with Lambda from events
EventPattern:
@@ -332,9 +356,11 @@ Resources:
- EnableNewAWSResourcesLambda
- Arn
Id: Main
+ Metadata:
+ SamResourceId: AutoEnableVPCEventsRuleTrigger
AutoEnableAlbLogEventsInvokePermission:
Type: AWS::Lambda::Permission
- Condition: enable_alb_logging
+ Condition: enable_alb_log_events
Properties:
Action: lambda:InvokeFunction
FunctionName:
@@ -344,9 +370,11 @@ Resources:
Fn::GetAtt:
- AutoEnableAlbLogEventsRuleTrigger
- Arn
+ Metadata:
+ SamResourceId: AutoEnableAlbLogEventsInvokePermission
AutoEnableAlbLogEventsRuleTrigger:
Type: AWS::Events::Rule
- Condition: enable_alb_logging
+ Condition: enable_alb_log_events
Properties:
Description: Auto-Enable S3 logging for ALB resources with Lambda from events
EventPattern:
@@ -379,19 +407,75 @@ Resources:
- EnableNewAWSResourcesLambda
- Arn
Id: Main
+ Metadata:
+ SamResourceId: AutoEnableAlbLogEventsRuleTrigger
+ AutoEnableElbLogEventsInvokePermission:
+ Type: AWS::Lambda::Permission
+ Condition: enable_elb_log_events
+ Properties:
+ Action: lambda:InvokeFunction
+ FunctionName:
+ Ref: EnableNewAWSResourcesLambda
+ Principal: events.amazonaws.com
+ SourceArn:
+ Fn::GetAtt:
+ - AutoEnableElbLogEventsRuleTrigger
+ - Arn
+ Metadata:
+ SamResourceId: AutoEnableElbLogEventsInvokePermission
+ AutoEnableElbLogEventsRuleTrigger:
+ Type: AWS::Events::Rule
+ Condition: enable_elb_log_events
+ Properties:
+ Description: Auto-Enable S3 logging for ELB classic resources with Lambda from
+ events
+ EventPattern:
+ source:
+ - aws.elasticloadbalancing
+ detail-type:
+ - AWS API Call via CloudTrail
+ detail:
+ eventSource:
+ - elasticloadbalancing.amazonaws.com
+ eventName:
+ - CreateLoadBalancer
+ Name:
+ Fn::Join:
+ - ''
+ - - sumo-logic-elb-s3-
+ - Fn::Select:
+ - 0
+ - Fn::Split:
+ - '-'
+ - Fn::Select:
+ - 2
+ - Fn::Split:
+ - /
+ - Ref: AWS::StackId
+ State: ENABLED
+ Targets:
+ - Arn:
+ Fn::GetAtt:
+ - EnableNewAWSResourcesLambda
+ - Arn
+ Id: Main
+ Metadata:
+ SamResourceId: AutoEnableElbLogEventsRuleTrigger
EnableExisitngAWSResourcesLambda:
Type: AWS::Serverless::Function
Condition: auto_enable_existing
Properties:
Handler: main.handler
- Runtime: python3.7
- CodeUri: s3://appdevstore/sumo_app_utils/v2.0.2/sumo_app_utils.zip
+ Runtime: python3.13
+ CodeUri: s3://appdevstore/sumo_app_utils/v2.0.21/sumo_app_utils.zip
MemorySize: 128
Timeout: 900
Role:
Fn::GetAtt:
- SumoLambdaRole
- Arn
+ Metadata:
+ SamResourceId: EnableExisitngAWSResourcesLambda
ExistingAWSResources:
Type: Custom::EnableS3LogsResources
Condition: auto_enable_existing
@@ -407,7 +491,10 @@ Resources:
- Fn::If:
- enable_vpc_flow_logs_logging
- vpc
- - elbv2
+ - Fn::If:
+ - enable_alb_logging
+ - elbv2
+ - elb
BucketName:
Ref: BucketName
Filter:
@@ -423,6 +510,8 @@ Resources:
- AccountId
RemoveOnDeleteStack:
Ref: RemoveOnDeleteStack
+ Metadata:
+ SamResourceId: ExistingAWSResources
Outputs:
EnableNewAWSResourcesLambda:
Description: Lambda Function ARN for New AWS Resources
diff --git a/awsautoenableS3Logging/sumologic-s3-logging-auto-enable.yaml b/awsautoenableS3Logging/sumologic-s3-logging-auto-enable.yaml
index a1f7edc..9f972be 100755
--- a/awsautoenableS3Logging/sumologic-s3-logging-auto-enable.yaml
+++ b/awsautoenableS3Logging/sumologic-s3-logging-auto-enable.yaml
@@ -1,6 +1,6 @@
AWSTemplateFormatVersion: '2010-09-09'
Transform: AWS::Serverless-2016-10-31
-Description: "Lambda Function for auto enable s3 logs for S3 Buckets, VPCs, Subnets, Network Interfaces and Application load balancer."
+Description: "Lambda Function for auto enable s3 logs for S3 Buckets, VPCs, Subnets, Network Interfaces, Application load balancer and Classic load balancer"
Globals:
Function:
@@ -24,8 +24,8 @@ Metadata:
- s3logging
- flowlogs
Name: sumologic-s3-logging-auto-enable
- SemanticVersion: 1.0.2
- SourceCodeUrl: https://github.com/SumoLogic/sumologic-aws-lambda/tree/master/awsautoenableS3Logging
+ SemanticVersion: 1.0.18
+ SourceCodeUrl: https://github.com/SumoLogic/sumologic-aws-lambda/tree/main/awsautoenableS3Logging
LicenseUrl: ./LICENSE
ReadmeUrl: ./README.md
SpdxLicenseId: Apache-2.0
@@ -88,12 +88,14 @@ Parameters:
Type: String
Description: "S3 - To Enable S3 Audit Logging for new S3 buckets.
VPC - To Enable VPC flow logs for new VPC, Subnets and Network Interfaces.
- ALB - To Enable S3 Logging for new Application Load Balancer."
+ ALB - To Enable S3 Logging for new Application Load Balancer.
+ ELB - To Enable S3 logging for new Classic Load Balancer"
AllowedPattern: ".+"
AllowedValues:
- 'S3'
- 'VPC'
- 'ALB'
+ - 'ELB'
AutoEnableResourceOptions:
Type: String
@@ -126,22 +128,17 @@ Parameters:
RemoveOnDeleteStack:
AllowedValues:
- true
- - false
+ - false
Default: true
Description: "True - To remove S3 logging or Vpc flow logs.
False - To keep the S3 logging."
Type: String
Conditions:
- enable_alb_logging: !And
- - !Equals [!Ref AutoEnableLogging, 'ALB']
- - !Condition auto_enable_new
- enable_s3_buckets_logging: !And
- - !Equals [!Ref AutoEnableLogging, 'S3']
- - !Condition auto_enable_new
- enable_vpc_flow_logs_logging: !And
- - !Equals [!Ref AutoEnableLogging, 'VPC']
- - !Condition auto_enable_new
+ enable_alb_logging: !Equals [!Ref AutoEnableLogging, 'ALB']
+ enable_elb_logging: !Equals [!Ref AutoEnableLogging, 'ELB']
+ enable_s3_buckets_logging: !Equals [!Ref AutoEnableLogging, 'S3']
+ enable_vpc_flow_logs_logging: !Equals [!Ref AutoEnableLogging, 'VPC']
auto_enable_existing: !Or
- !Equals [ !Ref AutoEnableResourceOptions, 'Existing' ]
@@ -149,6 +146,18 @@ Conditions:
auto_enable_new: !Or
- !Equals [ !Ref AutoEnableResourceOptions, 'New' ]
- !Equals [ !Ref AutoEnableResourceOptions, 'Both' ]
+ enable_alb_log_events: !And
+ - !Condition auto_enable_new
+ - !Condition enable_alb_logging
+ enable_elb_log_events: !And
+ - !Condition auto_enable_new
+ - !Condition enable_elb_logging
+ enable_s3_log_events: !And
+ - !Condition auto_enable_new
+ - !Condition enable_s3_buckets_logging
+ enable_vpc_log_events: !And
+ - !Condition auto_enable_new
+ - !Condition enable_vpc_flow_logs_logging
Resources:
@@ -172,8 +181,6 @@ Resources:
Action:
- elasticloadbalancing:DescribeLoadBalancerAttributes
- elasticloadbalancing:DescribeLoadBalancers
- - elasticloadbalancing:AddTags
- - elasticloadbalancing:RemoveTags
- elasticloadbalancing:ModifyLoadBalancerAttributes
- logs:CreateLogGroup
- logs:CreateLogStream
@@ -201,9 +208,9 @@ Resources:
Type: 'AWS::Serverless::Function'
Condition: auto_enable_new
Properties:
- CodeUri: s3://appdevstore/sumo_app_utils/v2.0.2/sumo_app_utils.zip
+ CodeUri: s3://appdevstore/sumo_app_utils/v2.0.21/sumo_app_utils.zip
Handler: "awsresource.enable_s3_logs"
- Runtime: python3.7
+ Runtime: python3.13
Role: !GetAtt SumoLambdaRole.Arn
Description: "Lambda Function for auto enable s3 logs for AWS Resources."
MemorySize: 128
@@ -218,7 +225,7 @@ Resources:
AutoEnableS3LogEventsInvokePermission:
Type: AWS::Lambda::Permission
- Condition: enable_s3_buckets_logging
+ Condition: enable_s3_log_events
Properties:
Action: lambda:InvokeFunction
FunctionName: !Ref EnableNewAWSResourcesLambda
@@ -227,7 +234,7 @@ Resources:
AutoEnableS3LogEventsRuleTrigger:
Type: 'AWS::Events::Rule'
- Condition: enable_s3_buckets_logging
+ Condition: enable_s3_log_events
Properties:
Description: Auto-Enable S3 loggging for S3 Buckets with Lambda from events
EventPattern:
@@ -257,7 +264,7 @@ Resources:
AutoEnableVPCEventsInvokePermission:
Type: AWS::Lambda::Permission
- Condition: enable_vpc_flow_logs_logging
+ Condition: enable_vpc_log_events
Properties:
Action: lambda:InvokeFunction
FunctionName: !Ref EnableNewAWSResourcesLambda
@@ -266,7 +273,7 @@ Resources:
AutoEnableVPCEventsRuleTrigger:
Type: 'AWS::Events::Rule'
- Condition: enable_vpc_flow_logs_logging
+ Condition: enable_vpc_log_events
Properties:
Description: Auto-Enable VPC Flow logs for VPCs with Lambda from events
EventPattern:
@@ -296,7 +303,7 @@ Resources:
AutoEnableAlbLogEventsInvokePermission:
Type: AWS::Lambda::Permission
- Condition: enable_alb_logging
+ Condition: enable_alb_log_events
Properties:
Action: lambda:InvokeFunction
FunctionName: !Ref EnableNewAWSResourcesLambda
@@ -305,7 +312,7 @@ Resources:
AutoEnableAlbLogEventsRuleTrigger:
Type: 'AWS::Events::Rule'
- Condition: enable_alb_logging
+ Condition: enable_alb_log_events
Properties:
Description: Auto-Enable S3 logging for ALB resources with Lambda from events
EventPattern:
@@ -333,13 +340,52 @@ Resources:
- Arn: !GetAtt EnableNewAWSResourcesLambda.Arn
Id: Main
+ AutoEnableElbLogEventsInvokePermission:
+ Type: AWS::Lambda::Permission
+ Condition: enable_elb_log_events
+ Properties:
+ Action: lambda:InvokeFunction
+ FunctionName: !Ref EnableNewAWSResourcesLambda
+ Principal: "events.amazonaws.com"
+ SourceArn: !GetAtt AutoEnableElbLogEventsRuleTrigger.Arn
+
+ AutoEnableElbLogEventsRuleTrigger:
+ Type: 'AWS::Events::Rule'
+ Condition: enable_elb_log_events
+ Properties:
+ Description: Auto-Enable S3 logging for ELB classic resources with Lambda from events
+ EventPattern:
+ source:
+ - aws.elasticloadbalancing
+ detail-type:
+ - AWS API Call via CloudTrail
+ detail:
+ eventSource:
+ - elasticloadbalancing.amazonaws.com
+ eventName:
+ - CreateLoadBalancer
+ Name: !Join
+ - ""
+ - - "sumo-logic-elb-s3-"
+ - !Select
+ - 0
+ - !Split
+ - "-"
+ - !Select
+ - 2
+ - !Split ["/", !Ref "AWS::StackId"]
+ State: ENABLED
+ Targets:
+ - Arn: !GetAtt EnableNewAWSResourcesLambda.Arn
+ Id: Main
+
EnableExisitngAWSResourcesLambda:
Type: 'AWS::Serverless::Function'
Condition: auto_enable_existing
Properties:
Handler: main.handler
- Runtime: python3.7
- CodeUri: s3://appdevstore/sumo_app_utils/v2.0.2/sumo_app_utils.zip
+ Runtime: python3.13
+ CodeUri: s3://appdevstore/sumo_app_utils/v2.0.21/sumo_app_utils.zip
MemorySize: 128
Timeout: 900
Role:
@@ -352,7 +398,7 @@ Resources:
Condition: auto_enable_existing
Properties:
ServiceToken: !GetAtt EnableExisitngAWSResourcesLambda.Arn
- AWSResource: !If [enable_s3_buckets_logging, "s3", !If [enable_vpc_flow_logs_logging, "vpc", "elbv2"] ]
+ AWSResource: !If [enable_s3_buckets_logging, "s3", !If [enable_vpc_flow_logs_logging, "vpc", !If [enable_alb_logging, "elbv2", "elb"]] ]
BucketName: !Ref BucketName
Filter: !Ref FilterExpression
BucketPrefix: !Ref BucketPrefix
diff --git a/awsautoenableS3Logging/test/Publishsam.sh b/awsautoenableS3Logging/test/Publishsam.sh
index 51ca477..6cc1d80 100755
--- a/awsautoenableS3Logging/test/Publishsam.sh
+++ b/awsautoenableS3Logging/test/Publishsam.sh
@@ -5,7 +5,7 @@ export AWS_PROFILE="default"
if [[ "${AWS_PROFILE}" == "personal" ]]
then
- SAM_S3_BUCKET="sumologiclambdahelper-us-east-1"
+ SAM_S3_BUCKET=""
else
SAM_S3_BUCKET="appdevstore"
fi
@@ -29,10 +29,11 @@ do
export version=`grep AWS::ServerlessRepo::Application: ../${VALUE} -A 20 | grep SemanticVersion | cut -d ':' -f 2 | xargs`
echo "Package and publish the Template file ${VALUE} with version ${version}."
- echo `sam validate -t ../${VALUE}`
+ ## ignoring lint command as errors can't fix
+ # echo `sam validate -t ../${VALUE} --lint`
sam package --profile ${AWS_PROFILE} --template-file ../${VALUE} --s3-bucket ${SAM_S3_BUCKET} --output-template-file ../packaged.yaml \
- --s3-prefix "${KEY}/v${version}"
+ --s3-prefix "${KEY}/v${version}" --region ${AWS_REGION}
sam publish --template ../packaged.yaml --region ${AWS_REGION} --semantic-version ${version}
echo "Publish done"
diff --git a/cloudformation-telemetry/install_python_dependencies.sh b/cloudformation-telemetry/install_python_dependencies.sh
new file mode 100755
index 0000000..7ce533a
--- /dev/null
+++ b/cloudformation-telemetry/install_python_dependencies.sh
@@ -0,0 +1,28 @@
+#!/bin/bash
+
+# Pull the Amazon Linux image from Docker Hub
+# aws ecr-public get-login-password --region us-east-1 | docker login --username AWS --password-stdin public.ecr.aws
+docker pull public.ecr.aws/lambda/python:3.13-x86_64
+
+# Run the Amazon Linux container in detached mode
+docker run -d --name telemetry public.ecr.aws/lambda/python:3.13-x86_64 lambda_function.lambda_handler
+
+# Install dependencies inside the container
+docker exec -it telemetry /bin/bash -c "dnf install -y zip"
+
+# Create a virtual environment and install dependencies
+docker exec -it telemetry /bin/bash -c "python3 -m venv temp-venv && source temp-venv/bin/activate && mkdir telemetry && cd telemetry && pip install crhelper sumologic-appclient-sdk future_fstrings setuptools -t ."
+
+# Copy python file from host to container
+docker cp ./lambda_function.py telemetry:/var/task/telemetry
+docker cp ./metadata.yaml telemetry:/var/task/telemetry
+
+# Zip the contents of the telemetry directory
+docker exec -it telemetry /bin/bash -c "cd telemetry && ls -l && zip -r ../telemetry.zip ."
+
+# Copy the telemetry.zip file from the container to the host
+docker cp telemetry:/var/task/telemetry.zip ./telemetry.zip
+
+# Stop and remove the container
+docker stop telemetry
+docker rm telemetry
\ No newline at end of file
diff --git a/cloudformation-telemetry/lambda_function.py b/cloudformation-telemetry/lambda_function.py
new file mode 100644
index 0000000..bd6edeb
--- /dev/null
+++ b/cloudformation-telemetry/lambda_function.py
@@ -0,0 +1,172 @@
+from collections import defaultdict
+import boto3
+import time
+from crhelper import CfnResource
+from sumoappclient.sumoclient.outputhandlers import HTTPHandler
+from sumoappclient.common.utils import read_yaml_file
+from abc import ABC, abstractmethod
+
+helper = CfnResource(json_logging=False, log_level='INFO', sleep_on_delete=30)
+
+@helper.create
+def create(event, context):
+ try:
+ T = telemetryFactory(event, context)
+ T.fetch_and_send_telemetry()
+ except Exception as e:
+ print(e)
+ return "Telemetry failed to sent for Create Stack"
+ helper.Status = "SUCCESS"
+ return "Telemetry sent for Create Stack"
+
+
+@helper.update
+def update(event, context):
+ try:
+ T = telemetryFactory(event, context)
+ T.fetch_and_send_telemetry()
+ except Exception as e:
+ print(e)
+ return "Telemetry failed to sent for Update Stack"
+ helper.Status = "SUCCESS"
+ return "Telemetry sent for Update Stack"
+
+
+@helper.delete
+def delete(event, context):
+ lambda_client = boto3.client('lambda')
+ try:
+ T = telemetryFactory(event, context)
+ T.fetch_and_send_telemetry()
+ # Self Delete the Telemetry Lambda function
+ if event['RequestType']=='Delete':
+ response = lambda_client.delete_function(FunctionName=context.invoked_function_arn)
+ except Exception as e:
+ print(e)
+ helper.Status = "SUCCESS"
+
+
+def lambda_handler(event, context):
+ helper(event, context)
+
+def telemetryFactory(event, context):
+ # create an obj of default class and return in case of none
+ if event['ResourceProperties']['solutionName'] == 'AWSO':
+ return awsoTelemetry(event, context)
+ else:
+ return parentStackTelemetry(event, context)
+ # elif event['ResourceProperties']['solutionName'] == 'CIS':
+ # return cisTelemetry(event, context)
+
+# Interface
+class baseTelemetry(ABC):
+
+ def __init__(self, event,context,*args, **kwargs):
+ self.event=event
+ self.context=context
+ self.config = read_yaml_file("./metadata.yaml")
+ self.config['SumoLogic']['SUMO_ENDPOINT'] = self.event['ResourceProperties']['TelemetryEndpoint']
+ self.sumoHttpHandler = HTTPHandler(self.config)
+ self.log = self.sumoHttpHandler.log
+ self.log.debug("Telemetry enabled")
+
+ @abstractmethod
+ def fetch_and_send_telemetry(self):
+ raise NotImplementedError
+
+ def send_telemetry(self, data):
+ r = self.sumoHttpHandler.send(data)
+
+# class cisTelemetry(baseTelemetry): # parentStackSetTelemetry
+# def create_telemetry_data(self):
+# pass
+
+class parentStackTelemetry(baseTelemetry):
+ def __init__(self, event,context,*args, **kwargs):
+ super().__init__(event,context)
+ self.stackID = event['ResourceProperties']['stackID']
+ self.cfclient = boto3.client('cloudformation')
+ self.all_resource_statuses=defaultdict(list)
+
+ def enrich_telemetry_data(self, log_data_list):
+ return log_data_list
+
+ # This function will return True if any of the child resources are *IN_PROGRESS state.
+ def _has_any_child_resources_in_progress_state(self):
+ all_stacks = self.cfclient.describe_stack_resources(StackName=self.stackID)
+ # PrimeInvoke - only responsible for triggering lambda
+ # Removing 'Primerinvoke' status from all_stacks status so that it is not considered during status checking else it'll result in endless loop becoz if PriveInvoke is not completed overall stack can't be completed.
+ for stack_resource in filter(lambda x: x["LogicalResourceId"] != self.event['LogicalResourceId'] ,all_stacks["StackResources"]):
+ stackStatus = stack_resource["ResourceStatus"]
+ if stackStatus.endswith('_IN_PROGRESS'):
+ return True
+ return False # None of the child resources are in IN_PROGRESS state
+
+ def _create_telemetry_data(self):
+ log_data_list=[]
+ all_stacks_events = self.cfclient.describe_stack_events(StackName= self.stackID)
+ for stack_resource in all_stacks_events["StackEvents"]:
+ resourceID = stack_resource["PhysicalResourceId"]
+ status = stack_resource["ResourceStatus"]
+ resource_status_reason = stack_resource.get('ResourceStatusReason', '')
+ if status not in self.all_resource_statuses.get(resourceID, []):
+ self.all_resource_statuses[resourceID].append(status)
+ log_data = {
+ 'requestid': self.context.aws_request_id,
+ 'timestamp': stack_resource['Timestamp'].isoformat(timespec='milliseconds'),
+ 'data': {
+ 'stackId': self.event['StackId'],
+ 'resourceType': stack_resource["ResourceType"],
+ 'resourceName': stack_resource["LogicalResourceId"],
+ 'resourceID': stack_resource["PhysicalResourceId"],
+ 'status': stack_resource["ResourceStatus"],
+ 'details': resource_status_reason
+ }
+ }
+ log_data_list.append(log_data)
+ return log_data_list
+
+ def fetch_and_send_telemetry(self):
+ resources_in_progress = True
+ while (resources_in_progress):
+ resources_in_progress = self._has_any_child_resources_in_progress_state()
+ log_data_list = self._create_telemetry_data()
+ log_data_list = self.enrich_telemetry_data(log_data_list)
+ self.send_telemetry(log_data_list)
+ # If all child resources are completed except PrimeInvoker, marking PrimeInvoker as completed
+ if not resources_in_progress:
+ helper._cfn_response(self.event)
+ time.sleep(int(self.event['ResourceProperties']['scanInterval']))
+ # If all resources are completed, make final call to know Parent stack status
+ if not resources_in_progress :
+ log_data_list = self._create_telemetry_data()
+ log_data_list = self.enrich_telemetry_data(log_data_list)
+ self.send_telemetry(log_data_list)
+
+
+class awsoTelemetry(parentStackTelemetry):
+ def __init__(self,event,context):
+ super().__init__(event,context)
+
+ def enrich_telemetry_data(self, log_data_list):
+ static_data = {
+ 'profile': {
+ 'sumo': {
+ 'deployment': self.event['ResourceProperties']['sumoDeployment'],
+ 'orgid': self.event['ResourceProperties']['sumoOrgId'],
+ },
+ 'solution': {
+ 'name': self.event['ResourceProperties']['solutionName'],
+ 'version': self.event['ResourceProperties']['solutionVersion'],
+ 'deploymentSource': self.event['ResourceProperties']['deploymentSource']
+ },
+ }
+ }
+ for log_data in log_data_list:
+ log_data.update(static_data)
+ return log_data_list
+
+if __name__=="__main__":
+ event={}
+ context = {"aws_request_id":"5678-sxcvbnm-fghjk-123456789"}
+ create(event,context)
\ No newline at end of file
diff --git a/cloudformation-telemetry/metadata.yaml b/cloudformation-telemetry/metadata.yaml
new file mode 100644
index 0000000..f7a036f
--- /dev/null
+++ b/cloudformation-telemetry/metadata.yaml
@@ -0,0 +1,21 @@
+Logging:
+ LOG_FORMAT: "%(levelname)s | %(asctime)s | %(threadName)s | %(name)s | %(message)s"
+ ROTATION_TYPE: D
+ ROTATION_INTERVAL: 10
+ ENABLE_CONSOLE_LOG: true
+ ENABLE_LOGFILE: false
+ LOG_FILEPATH: /tmp/cloudformation-telemetry/telemetry.log
+ LOG_LEVEL: "DEBUG"
+
+Collection:
+ MAX_PAYLOAD_BYTESIZE: 500000
+ MAX_RETRY: 5
+ BACKOFF_FACTOR: 1
+ COMPRESSED: true
+ TIMEOUT: 60
+
+DeployMetaData:
+ PACKAGENAME: "cloudformation-telemetry"
+
+SumoLogic:
+ SUMO_ENDPOINT: null
\ No newline at end of file
diff --git a/cloudformation-telemetry/telemetry.zip b/cloudformation-telemetry/telemetry.zip
new file mode 100644
index 0000000..1f05eef
Binary files /dev/null and b/cloudformation-telemetry/telemetry.zip differ
diff --git a/cloudtrailbenchmark/packaged.yaml b/cloudtrailbenchmark/packaged.yaml
index 59c55d7..2b32b23 100644
--- a/cloudtrailbenchmark/packaged.yaml
+++ b/cloudtrailbenchmark/packaged.yaml
@@ -43,10 +43,10 @@ Metadata:
- benchmark
- cloudtrail
Name: sumologic-aws-cloudtrail-benchmark
- LicenseUrl: s3://appdevstore/cloudtrailbenchmark/v1.0.11/cac1a6df52c685e0f6ebe4ae72078c80
- ReadmeUrl: s3://appdevstore/cloudtrailbenchmark/v1.0.11/8a901bb4fbbe82f128fc502dd3077508
- SemanticVersion: 1.0.11
- SourceCodeUrl: https://github.com/SumoLogic/sumologic-aws-lambda/tree/master/cloudtrailbenchmark
+ LicenseUrl: s3://appdevstore/cloudtrailbenchmark/v1.0.21/cac1a6df52c685e0f6ebe4ae72078c80
+ ReadmeUrl: s3://appdevstore/cloudtrailbenchmark/v1.0.21/8a901bb4fbbe82f128fc502dd3077508
+ SemanticVersion: 1.0.21
+ SourceCodeUrl: https://github.com/SumoLogic/sumologic-aws-lambda/tree/main/cloudtrailbenchmark
Parameters:
CollectorName:
Type: String
@@ -61,6 +61,7 @@ Parameters:
Type: String
SumoAccessKey:
Type: String
+ NoEcho: true
SumoDeployment:
Type: String
AllowedValues:
@@ -68,12 +69,12 @@ Parameters:
- ca
- de
- eu
+ - fed
- jp
- - us2
+ - kr
- us1
- - in
- - fed
- Description: Enter au, ca, de, eu, jp, us2, in, fed or us1
+ - us2
+ Description: Enter au, ca, de, eu, fed, jp, kr, us1 or us2
RemoveSumoResourcesOnDeleteStack:
AllowedValues:
- true
@@ -127,7 +128,9 @@ Resources:
Properties:
Location:
ApplicationId: arn:aws:serverlessrepo:us-east-1:956882708938:applications/sumologic-app-utils
- SemanticVersion: 2.0.1
+ SemanticVersion: 2.0.21
+ Metadata:
+ SamResourceId: SumoAppUtils
SumoRole:
Condition: SetupSumoResources
Type: AWS::IAM::Role
@@ -164,6 +167,8 @@ Resources:
- arn:aws:s3:::${CloudTrailTargetS3BucketName}/*
- S3bucketName:
Ref: CloudTrailTargetS3BucketName
+ Metadata:
+ SamResourceId: SumoRole
SumoCloudTrailExportPolicy:
Condition: SetUpCloudTrail
Type: AWS::S3::BucketPolicy
@@ -198,6 +203,8 @@ Resources:
- Fn::Sub: arn:aws:s3:::${CloudTrailTargetS3BucketName}
DependsOn:
- TargetS3Bucket
+ Metadata:
+ SamResourceId: SumoCloudTrailExportPolicy
SumoCloudTrail:
Condition: SetUpCloudTrail
Type: Custom::AWSTrail
@@ -215,6 +222,8 @@ Resources:
Ref: CloudTrailTargetS3BucketName
TrailName:
Fn::Sub: SumoCloudTrail-${AWS::StackName}
+ Metadata:
+ SamResourceId: SumoCloudTrail
TargetS3Bucket:
Condition: SetUpCloudTrail
Type: AWS::S3::Bucket
@@ -228,12 +237,16 @@ Resources:
- Event: s3:ObjectCreated:Put
Topic:
Ref: SumoSNSTopic
+ Metadata:
+ SamResourceId: TargetS3Bucket
SumoSNSTopic:
Condition: SetupSumoResources
Type: AWS::SNS::Topic
Properties:
TopicName:
Fn::Sub: SumoSNSTopic-${AWS::StackName}
+ Metadata:
+ SamResourceId: SumoSNSTopic
SumoSNSSubscription:
Condition: SetupSumoResources
Type: AWS::SNS::Subscription
@@ -254,6 +267,8 @@ Resources:
numMaxDelayRetries: 5
numNoDelayRetries: 0
backoffFunction: exponential
+ Metadata:
+ SamResourceId: SumoSNSSubscription
SumoSNSpolicy:
Condition: SetupSumoResources
Type: AWS::SNS::TopicPolicy
@@ -277,6 +292,8 @@ Resources:
- Ref: SumoSNSTopic
Topics:
- Ref: SumoSNSTopic
+ Metadata:
+ SamResourceId: SumoSNSpolicy
SumoHostedCollector:
Condition: SetupSumoResources
Type: Custom::Collector
@@ -300,6 +317,8 @@ Resources:
Ref: SumoDeployment
DependsOn:
- SumoAppUtils
+ Metadata:
+ SamResourceId: SumoHostedCollector
SumoS3Source:
Condition: SetupSumoResources
Type: Custom::AWSSource
@@ -337,6 +356,8 @@ Resources:
- Arn
DependsOn:
- SumoAppUtils
+ Metadata:
+ SamResourceId: SumoS3Source
SumoCloudTrailBenchmarkApp:
Type: Custom::App
Properties:
@@ -363,6 +384,8 @@ Resources:
Ref: SumoDeployment
DependsOn:
- SumoAppUtils
+ Metadata:
+ SamResourceId: SumoCloudTrailBenchmarkApp
SumoGISCloudTrailDevOpsApp:
Type: Custom::App
Properties:
@@ -389,6 +412,8 @@ Resources:
Ref: SumoDeployment
DependsOn:
- SumoAppUtils
+ Metadata:
+ SamResourceId: SumoGISCloudTrailDevOpsApp
Outputs:
CloudTrailBenchmarkAppFolder:
Description: Folder Name
diff --git a/cloudtrailbenchmark/template.yaml b/cloudtrailbenchmark/template.yaml
index 06de302..cd25a0f 100644
--- a/cloudtrailbenchmark/template.yaml
+++ b/cloudtrailbenchmark/template.yaml
@@ -49,8 +49,8 @@ Metadata:
Name: sumologic-aws-cloudtrail-benchmark
LicenseUrl: ./LICENSE
ReadmeUrl: ./README.md
- SemanticVersion: 1.0.11
- SourceCodeUrl: https://github.com/SumoLogic/sumologic-aws-lambda/tree/master/cloudtrailbenchmark
+ SemanticVersion: 1.0.21
+ SourceCodeUrl: https://github.com/SumoLogic/sumologic-aws-lambda/tree/main/cloudtrailbenchmark
Parameters:
@@ -67,6 +67,7 @@ Parameters:
Type: String
SumoAccessKey:
Type: String
+ NoEcho: true
SumoDeployment:
Type: String
AllowedValues:
@@ -74,12 +75,12 @@ Parameters:
- ca
- de
- eu
+ - fed
- jp
- - us2
+ - kr
- us1
- - in
- - fed
- Description: "Enter au, ca, de, eu, jp, us2, in, fed or us1"
+ - us2
+ Description: "Enter au, ca, de, eu, fed, jp, kr, us1 or us2"
RemoveSumoResourcesOnDeleteStack:
AllowedValues:
- true
@@ -123,7 +124,8 @@ Resources:
Properties:
Location:
ApplicationId: arn:aws:serverlessrepo:us-east-1:956882708938:applications/sumologic-app-utils
- SemanticVersion: 2.0.1
+ SemanticVersion: 2.0.21
+
SumoRole:
Condition: SetupSumoResources
@@ -257,7 +259,7 @@ Resources:
- sns:Publish
Condition:
StringEquals:
- aws:SourceAccount: !Ref "AWS::AccountId"
+ aws:SourceAccount: !Ref AWS::AccountId
ArnLike:
aws:SourceArn: !Sub "arn:aws:s3:::${CloudTrailTargetS3BucketName}"
Effect: Allow
@@ -273,7 +275,7 @@ Resources:
Type: Custom::Collector
Properties:
ServiceToken: !GetAtt SumoAppUtils.Outputs.SumoAppUtilsFunction
- Region: !Ref "AWS::Region"
+ Region: !Ref AWS::Region
CollectorType: Hosted
RemoveOnDeleteStack: !Ref RemoveSumoResourcesOnDeleteStack
CollectorName: !Ref CollectorName
@@ -290,7 +292,7 @@ Resources:
Properties:
SourceType: AwsCloudTrailBucket
ServiceToken: !GetAtt SumoAppUtils.Outputs.SumoAppUtilsFunction
- Region: !Ref "AWS::Region"
+ Region: !Ref AWS::Region
SourceName: !Ref SourceName
TargetBucketName: !Ref CloudTrailTargetS3BucketName
RemoveOnDeleteStack: !Ref RemoveSumoResourcesOnDeleteStack
@@ -309,7 +311,7 @@ Resources:
Type: Custom::App
Properties:
ServiceToken: !GetAtt SumoAppUtils.Outputs.SumoAppUtilsFunction
- Region: !Ref "AWS::Region"
+ Region: !Ref AWS::Region
AppName: "Global Intelligence for AWS CloudTrail"
AppId: "570bdc0d-f824-4fcb-96b2-3230d4497180"
RemoveOnDeleteStack: !Ref RemoveSumoResourcesOnDeleteStack
@@ -328,7 +330,7 @@ Resources:
Type: Custom::App
Properties:
ServiceToken: !GetAtt SumoAppUtils.Outputs.SumoAppUtilsFunction
- Region: !Ref "AWS::Region"
+ Region: !Ref AWS::Region
AppName: "Global Intelligence for CloudTrail DevOps"
AppId: "c7e195de-f169-460a-8e8b-7bb23af0ee5e"
RemoveOnDeleteStack: !Ref RemoveSumoResourcesOnDeleteStack
diff --git a/cloudtrailbenchmark/test/custom/app.yaml b/cloudtrailbenchmark/test/custom/app.yaml
new file mode 100644
index 0000000..e273c30
--- /dev/null
+++ b/cloudtrailbenchmark/test/custom/app.yaml
@@ -0,0 +1,80 @@
+AWSTemplateFormatVersion: '2010-09-09'
+Transform: AWS::Serverless-2016-10-31
+Description: 'This solution is used to test Sumo CloudTrail BenchmarkApp.'
+Globals:
+ Function:
+ Timeout: 300
+Metadata:
+ AWS::CloudFormation::Interface:
+ ParameterGroups:
+ - Label:
+ default: Sumo Logic Deployment Configuration
+ Parameters:
+ - SumoDeployment
+ - SumoAccessID
+ - SumoAccessKey
+ - Label:
+ default: Collection Configuration
+ Parameters:
+ - SourceCategoryName
+ - RemoveSumoResourcesOnDeleteStack
+Parameters:
+ SourceCategoryName:
+ Type: String
+ Default: Labs/AWS/CloudTrail
+ SumoAccessID:
+ Type: String
+ SumoAccessKey:
+ Type: String
+ NoEcho: true
+ SumoDeployment:
+ Type: String
+ AllowedValues:
+ - au
+ - ca
+ - de
+ - eu
+ - fed
+ - jp
+ - kr
+ - us1
+ - us2
+ Description: "Enter au, ca, de, eu, fed, jp, kr, us1 or us2"
+ RemoveSumoResourcesOnDeleteStack:
+ AllowedValues:
+ - true
+ - false
+ Default: false
+ Description: To delete collector, sources and app when stack is deleted, set this
+ parameter to true. Default is false.
+ Type: String
+ SumoAppUtilsFunction:
+ Description: Arn of Lambda function
+ Type: String
+
+
+Resources:
+ SumoCloudTrailBenchmarkApp:
+ Type: Custom::App
+ Properties:
+ ServiceToken:
+ Ref: SumoAppUtilsFunction
+ Region:
+ Ref: AWS::Region
+ AppName: Global Intelligence for AWS CloudTrail
+ AppId: 570bdc0d-f824-4fcb-96b2-3230d4497180
+ RemoveOnDeleteStack:
+ Ref: RemoveSumoResourcesOnDeleteStack
+ AppSources:
+ cloudtraillogsource:
+ Fn::Sub: _sourceCategory=${SourceCategoryName}
+ indexname: '%rnd%'
+ incrementalindex: '%rnd%'
+ SumoAccessID:
+ Ref: SumoAccessID
+ SumoAccessKey:
+ Ref: SumoAccessKey
+ SumoDeployment:
+ Ref: SumoDeployment
+ Metadata:
+ SamResourceId: SumoCloudTrailBenchmarkApp
\ No newline at end of file
diff --git a/cloudtrailbenchmark/testdeploy.sh b/cloudtrailbenchmark/testdeploy.sh
old mode 100644
new mode 100755
index ec18cac..d2f10a5
--- a/cloudtrailbenchmark/testdeploy.sh
+++ b/cloudtrailbenchmark/testdeploy.sh
@@ -1,18 +1,20 @@
#!/bin/bash
+export AWS_PROFILE="prod"
+export AWS_REGION="us-east-1"
if [ "$AWS_PROFILE" == "prod" ]
then
SAM_S3_BUCKET="appdevstore"
AWS_REGION="us-east-1"
else
- SAM_S3_BUCKET="cf-templates-5d0x5unchag-us-east-2"
+ SAM_S3_BUCKET=""
AWS_REGION="us-east-2"
fi
uid=$(cat /dev/random | LC_CTYPE=C tr -dc "[:lower:]" | head -c 6)
-version="1.0.11"
+version="1.0.21"
-sam package --template-file template.yaml --s3-bucket $SAM_S3_BUCKET --output-template-file packaged.yaml --s3-prefix "cloudtrailbenchmark/v$version"
+sam package --template-file template.yaml --s3-bucket $SAM_S3_BUCKET --output-template-file packaged.yaml --s3-prefix "cloudtrailbenchmark/v$version" --region $AWS_REGION --profile $AWS_PROFILE
sam publish --template packaged.yaml --region $AWS_REGION --semantic-version $version
diff --git a/cloudwatchevents/README.md b/cloudwatchevents/README.md
index 904c4c2..84f797d 100644
--- a/cloudwatchevents/README.md
+++ b/cloudwatchevents/README.md
@@ -13,7 +13,7 @@ First create an [HTTP collector endpoint](http://help.sumologic.com/Send_Data/So
2. Select `Blank Function` on the select blueprint page
3. Leave triggers empty for now, click next
4. Configure Lambda
- * Select Node.js 14.x as runtime
+ * Select Node.js 22.x as runtime
* Copy code from cloudwatchevents.js into the Lambda function code.
* Add Environment variables (See below)
5. Scroll down to the `Lambda function handle and role` section, make sure you set the right values that match the function. For role, you can just use the basic execution role. Click next.
@@ -25,7 +25,7 @@ First create an [HTTP collector endpoint](http://help.sumologic.com/Send_Data/So
The following AWS Lambda environment variables are supported
-* `SUMO_ENDPOINT` (REQUIRED) - SumoLogic HTTP Collector [endpoint URL](http://help.sumologic.com/Send_Data/Sources/02Sources_for_Hosted_Collectors/HTTP_Source).
+* `SUMO_ENDPOINT` (REQUIRED) - SumoLogic HTTP Collector [endpoint URL](https://help.sumologic.com/docs/send-data/hosted-collectors/http-source/logs-metrics).
* `SOURCE_CATEGORY_OVERRIDE` (OPTIONAL) - Override _sourceCategory metadata field within SumoLogic. If `none` will not be overridden
* `SOURCE_HOST_OVERRIDE` (OPTIONAL) - Override _sourceHost metadata field within SumoLogic. If `none` will not be overridden
* `SOURCE_NAME_OVERRIDE` (OPTIONAL) - Override _sourceName metadata field within SumoLogic. If `none` will not be overridden
@@ -64,7 +64,11 @@ Create a S3 bucket in AWS with following policy
"Service": "serverlessrepo.amazonaws.com"
},
"Action": "s3:GetObject",
- "Resource": "arn:aws:s3:::bucket_name/*"
+ "Resource": "arn:aws:s3:::bucket_name/*",
+ "Condition": {
+ "StringEquals": {
+ "aws:SourceAccount": ""
+ }
}
]
}
diff --git a/cloudwatchevents/guardduty/cloudwatchevents.json b/cloudwatchevents/guardduty/cloudwatchevents.json
index 2d04db7..1207e61 100644
--- a/cloudwatchevents/guardduty/cloudwatchevents.json
+++ b/cloudwatchevents/guardduty/cloudwatchevents.json
@@ -85,7 +85,7 @@
]
},
"Timeout": 300,
- "Runtime": "nodejs14.x"
+ "Runtime": "nodejs22.x"
}
},
"CloudWatchEventFunctionCloudWatchEventTriggerPermission": {
diff --git a/cloudwatchevents/guardduty/deploy.sh b/cloudwatchevents/guardduty/deploy.sh
new file mode 100755
index 0000000..be889db
--- /dev/null
+++ b/cloudwatchevents/guardduty/deploy.sh
@@ -0,0 +1,22 @@
+#!/bin/bash
+
+export AWS_PROFILE="prod"
+if [ "$AWS_PROFILE" == "prod" ]
+then
+ SAM_S3_BUCKET="appdevstore"
+ AWS_REGION="us-east-1"
+else
+ SAM_S3_BUCKET="cf-templates-5d0x5unchag-us-east-2"
+ AWS_REGION="us-east-2"
+fi
+
+version="1.0.6"
+
+echo "Creating package.yaml"
+sam package --template-file template.yaml --s3-bucket $SAM_S3_BUCKET --output-template-file packaged.yaml --s3-prefix "GuardDuty/v"$version --region $AWS_REGION --profile $AWS_PROFILE
+
+echo "Publishing sumologic-guardduty-events-processor "$version
+sam publish --template packaged.yaml --region $AWS_REGION --semantic-version $version
+
+echo "Published sumologic-guardduty-events-processor "$version
+# sam deploy --template-file packaged_sumo_app_utils.yaml --stack-name testingsumoapputils --capabilities CAPABILITY_IAM --region $AWS_REGION
\ No newline at end of file
diff --git a/cloudwatchevents/guardduty/packaged.yaml b/cloudwatchevents/guardduty/packaged.yaml
index 3ea9902..ce86ff8 100644
--- a/cloudwatchevents/guardduty/packaged.yaml
+++ b/cloudwatchevents/guardduty/packaged.yaml
@@ -23,10 +23,10 @@ Metadata:
- cloudwatchevents
- guardduty
Name: sumologic-guardduty-events-processor
- LicenseUrl: s3://appdevstore/GuardDuty/v1.0.3/6092dd6c323e33634657102f570628e0
- ReadmeUrl: s3://appdevstore/GuardDuty/v1.0.3/9d217c45b3ababadef584aee27d4d607
- SemanticVersion: 1.0.3
- SourceCodeUrl: https://github.com/SumoLogic/sumologic-aws-lambda/tree/master/cloudwatchevents/guardduty
+ LicenseUrl: s3://appdevstore/GuardDuty/v1.0.6/6092dd6c323e33634657102f570628e0
+ ReadmeUrl: s3://appdevstore/GuardDuty/v1.0.6/9d217c45b3ababadef584aee27d4d607
+ SemanticVersion: 1.0.6
+ SourceCodeUrl: https://github.com/SumoLogic/sumologic-aws-lambda/tree/main/cloudwatchevents/guardduty
SpdxLicenseId: Apache-2.0
Parameters:
SumoEndpointUrl:
@@ -35,9 +35,9 @@ Resources:
CloudWatchEventFunction:
Type: AWS::Serverless::Function
Properties:
- CodeUri: s3://appdevstore/GuardDuty/v1.0.3/58c7f17decc811a6e8904a19a9f844a1
+ CodeUri: s3://appdevstore/GuardDuty/v1.0.6/22b7b226ca9ec3b9dbb28b94c0e5f824
Handler: cloudwatchevents.handler
- Runtime: nodejs14.x
+ Runtime: nodejs22.x
Environment:
Variables:
SUMO_ENDPOINT:
@@ -49,6 +49,8 @@ Resources:
Pattern:
source:
- aws.guardduty
+ Metadata:
+ SamResourceId: CloudWatchEventFunction
Outputs:
CloudWatchEventFunction:
Description: CloudWatchEvent Processor Function ARN
diff --git a/cloudwatchevents/guardduty/template.yaml b/cloudwatchevents/guardduty/template.yaml
index 8024a39..984f60c 100644
--- a/cloudwatchevents/guardduty/template.yaml
+++ b/cloudwatchevents/guardduty/template.yaml
@@ -23,8 +23,8 @@ Metadata:
Name: sumologic-guardduty-events-processor
LicenseUrl: ../LICENSE
ReadmeUrl: ./README.md
- SemanticVersion: 1.0.3
- SourceCodeUrl: https://github.com/SumoLogic/sumologic-aws-lambda/tree/master/cloudwatchevents/guardduty
+ SemanticVersion: 1.0.6
+ SourceCodeUrl: https://github.com/SumoLogic/sumologic-aws-lambda/tree/main/cloudwatchevents/guardduty
SpdxLicenseId: Apache-2.0
Parameters:
@@ -38,7 +38,7 @@ Resources:
Properties:
CodeUri: ../src/
Handler: cloudwatchevents.handler
- Runtime: nodejs14.x
+ Runtime: nodejs22.x
Environment:
Variables:
SUMO_ENDPOINT: !Ref SumoEndpointUrl
diff --git a/cloudwatchevents/guarddutybenchmark/deploy.sh b/cloudwatchevents/guarddutybenchmark/deploy.sh
new file mode 100755
index 0000000..c3e9e4b
--- /dev/null
+++ b/cloudwatchevents/guarddutybenchmark/deploy.sh
@@ -0,0 +1,23 @@
+#!/bin/bash
+
+export AWS_PROFILE="prod"
+export AWS_REGION="us-east-1"
+if [ "$AWS_PROFILE" == "prod" ]
+then
+ SAM_S3_BUCKET="appdevstore"
+ AWS_REGION="us-east-1"
+else
+ SAM_S3_BUCKET="cf-templates-5d0x5unchag-us-east-2"
+ AWS_REGION="us-east-2"
+fi
+
+version="1.0.18"
+
+echo "Creating package.yaml"
+sam package --template-file template_v2.yaml --s3-bucket $SAM_S3_BUCKET --output-template-file packaged.yaml --s3-prefix "guarddutybenchmark/v"$version --region $AWS_REGION --profile $AWS_PROFILE
+
+echo "Publishing sumologic-guardduty-benchmark "$version
+sam publish --template packaged.yaml --region $AWS_REGION --semantic-version $version
+
+echo "Published sumologic-guardduty-benchmark "$version
+# sam deploy --template-file packaged_sumo_app_utils.yaml --stack-name testingsumoapputils --capabilities CAPABILITY_IAM --region $AWS_REGION
\ No newline at end of file
diff --git a/cloudwatchevents/guarddutybenchmark/packaged.yaml b/cloudwatchevents/guarddutybenchmark/packaged.yaml
index 8d40759..6b0d0cd 100644
--- a/cloudwatchevents/guarddutybenchmark/packaged.yaml
+++ b/cloudwatchevents/guarddutybenchmark/packaged.yaml
@@ -53,10 +53,10 @@ Metadata:
- benchmark
- guardduty
Name: sumologic-guardduty-benchmark
- LicenseUrl: s3://appdevstore/guarddutybenchmark/v1.0.11/6092dd6c323e33634657102f570628e0
- ReadmeUrl: s3://appdevstore/guarddutybenchmark/v1.0.11/cab012d7fb7887671b751e6f5c0d2062
- SemanticVersion: 1.0.11
- SourceCodeUrl: https://github.com/SumoLogic/sumologic-aws-lambda/tree/master/cloudwatchevents/guarddutybenchmark
+ LicenseUrl: s3://appdevstore/guarddutybenchmark/v1.0.18/6092dd6c323e33634657102f570628e0
+ ReadmeUrl: s3://appdevstore/guarddutybenchmark/v1.0.18/cab012d7fb7887671b751e6f5c0d2062
+ SemanticVersion: 1.0.18
+ SourceCodeUrl: https://github.com/SumoLogic/sumologic-aws-lambda/tree/main/cloudwatchevents/guarddutybenchmark
SpdxLicenseId: Apache-2.0
Parameters:
CollectorName:
@@ -72,6 +72,7 @@ Parameters:
Type: String
SumoAccessKey:
Type: String
+ NoEcho: true
SumoDeployment:
Type: String
AllowedValues:
@@ -79,12 +80,12 @@ Parameters:
- ca
- de
- eu
+ - fed
- jp
- - us2
+ - kr
- us1
- - in
- - fed
- Description: Enter in, fed, au, ca, de, eu, jp, us2, or us1
+ - us2
+ Description: Enter au, ca, de, eu, fed, jp, kr, us1 or us2
RemoveSumoResourcesOnDeleteStack:
AllowedValues:
- true
@@ -96,7 +97,7 @@ Parameters:
Resources:
CloudWatchEventFunction:
Properties:
- CodeUri: s3://appdevstore/guarddutybenchmark/v1.0.11/58c7f17decc811a6e8904a19a9f844a1
+ CodeUri: s3://appdevstore/guarddutybenchmark/v1.0.18/22b7b226ca9ec3b9dbb28b94c0e5f824
Environment:
Variables:
SUMO_ENDPOINT:
@@ -111,14 +112,18 @@ Resources:
- aws.guardduty
Type: CloudWatchEvent
Handler: cloudwatchevents.handler
- Runtime: nodejs14.x
+ Runtime: nodejs22.x
Type: AWS::Serverless::Function
+ Metadata:
+ SamResourceId: CloudWatchEventFunction
SumoAppUtils:
Type: AWS::Serverless::Application
Properties:
Location:
ApplicationId: arn:aws:serverlessrepo:us-east-1:956882708938:applications/sumologic-app-utils
- SemanticVersion: 2.0.6
+ SemanticVersion: 2.0.21
+ Metadata:
+ SamResourceId: SumoAppUtils
SumoHostedCollector:
Type: Custom::Collector
Properties:
@@ -139,6 +144,8 @@ Resources:
Ref: SumoAccessKey
SumoDeployment:
Ref: SumoDeployment
+ Metadata:
+ SamResourceId: SumoHostedCollector
SumoHTTPSource:
Type: Custom::HTTPSource
Properties:
@@ -166,6 +173,8 @@ Resources:
Ref: SumoDeployment
DateFormat: yyyy-MM-dd'T'HH:mm:ss.SSS'Z'
DateLocatorRegex: .*"updatedAt":"(.*)".*
+ Metadata:
+ SamResourceId: SumoHTTPSource
SumoGuardDutyBenchmarkApp:
Type: Custom::App
Properties:
@@ -188,6 +197,8 @@ Resources:
Ref: SumoAccessKey
SumoDeployment:
Ref: SumoDeployment
+ Metadata:
+ SamResourceId: SumoGuardDutyBenchmarkApp
Outputs:
CloudWatchEventFunction:
Description: CloudWatchEvent Processor Function ARN
diff --git a/cloudwatchevents/guarddutybenchmark/template_v2.yaml b/cloudwatchevents/guarddutybenchmark/template_v2.yaml
index 23aed10..a3bb22e 100644
--- a/cloudwatchevents/guarddutybenchmark/template_v2.yaml
+++ b/cloudwatchevents/guarddutybenchmark/template_v2.yaml
@@ -56,8 +56,8 @@ Metadata:
Name: sumologic-guardduty-benchmark
LicenseUrl: ../LICENSE
ReadmeUrl: ./README.md
- SemanticVersion: 1.0.11
- SourceCodeUrl: https://github.com/SumoLogic/sumologic-aws-lambda/tree/master/cloudwatchevents/guarddutybenchmark
+ SemanticVersion: 1.0.18
+ SourceCodeUrl: https://github.com/SumoLogic/sumologic-aws-lambda/tree/main/cloudwatchevents/guarddutybenchmark
SpdxLicenseId: Apache-2.0
Parameters:
@@ -74,6 +74,7 @@ Parameters:
Type: String
SumoAccessKey:
Type: String
+ NoEcho: true
SumoDeployment:
Type: String
AllowedValues:
@@ -81,12 +82,12 @@ Parameters:
- ca
- de
- eu
+ - fed
- jp
- - us2
+ - kr
- us1
- - in
- - fed
- Description: "Enter in, fed, au, ca, de, eu, jp, us2, or us1"
+ - us2
+ Description: "Enter au, ca, de, eu, fed, jp, kr, us1 or us2"
RemoveSumoResourcesOnDeleteStack:
AllowedValues:
- true
@@ -110,7 +111,7 @@ Resources:
- aws.guardduty
Type: CloudWatchEvent
Handler: cloudwatchevents.handler
- Runtime: nodejs14.x
+ Runtime: nodejs22.x
Type: AWS::Serverless::Function
SumoAppUtils:
@@ -118,13 +119,13 @@ Resources:
Properties:
Location:
ApplicationId: arn:aws:serverlessrepo:us-east-1:956882708938:applications/sumologic-app-utils
- SemanticVersion: 2.0.6
+ SemanticVersion: 2.0.21
SumoHostedCollector:
Type: Custom::Collector
Properties:
ServiceToken: !GetAtt SumoAppUtils.Outputs.SumoAppUtilsFunction
- Region: !Ref "AWS::Region"
+ Region: !Ref AWS::Region
CollectorType: Hosted
RemoveOnDeleteStack: !Ref RemoveSumoResourcesOnDeleteStack
CollectorName: !Ref CollectorName
@@ -136,7 +137,7 @@ Resources:
Type: Custom::HTTPSource
Properties:
ServiceToken: !GetAtt SumoAppUtils.Outputs.SumoAppUtilsFunction
- Region: !Ref "AWS::Region"
+ Region: !Ref AWS::Region
SourceName: !Ref SourceName
RemoveOnDeleteStack: !Ref RemoveSumoResourcesOnDeleteStack
SourceCategory: !Ref SourceCategoryName
@@ -151,7 +152,7 @@ Resources:
Type: Custom::App
Properties:
ServiceToken: !GetAtt SumoAppUtils.Outputs.SumoAppUtilsFunction
- Region: !Ref "AWS::Region"
+ Region: !Ref AWS::Region
AppName: "Global Intelligence for Amazon GuardDuty"
AppId: "8e7efcb3-040a-4a92-9f8d-922fafb24afb"
RemoveOnDeleteStack: !Ref RemoveSumoResourcesOnDeleteStack
diff --git a/cloudwatchevents/guarddutyeventprocessor.yaml b/cloudwatchevents/guarddutyeventprocessor.yaml
new file mode 100644
index 0000000..2b7c62a
--- /dev/null
+++ b/cloudwatchevents/guarddutyeventprocessor.yaml
@@ -0,0 +1,204 @@
+AWSTemplateFormatVersion: 2010-09-09
+Description: >
+ This function is invoked by AWS CloudWatch events in response to state change
+ in your AWS resources which matches a event target definition. The event
+ payload received is then forwarded to Sumo Logic HTTP source endpoint.
+Parameters:
+ SumoEndpointUrl:
+ Type: String
+Outputs:
+ CloudWatchEventFunction:
+ Description: CloudWatchEvent Processor Function ARN
+ Value: !GetAtt
+ - CloudWatchEventFunction
+ - Arn
+Resources:
+ CloudWatchEventFunction:
+ Type: 'AWS::Lambda::Function'
+ Metadata:
+ SamResourceId: CloudWatchEventFunction
+ Properties:
+ Code:
+ ZipFile: |
+ // SumoLogic Endpoint to post logs
+ var SumoURL = process.env.SUMO_ENDPOINT;
+
+ // For some beta AWS services, the default is to remove the outer fields of the received object since they are not useful.
+ // change this if necessary.
+ var removeOuterFields = false;
+
+ // The following parameters override the sourceCategoryOverride, sourceHostOverride and sourceNameOverride metadata fields within SumoLogic.
+ // Not these can also be overridden via json within the message payload. See the README for more information.
+ var sourceCategoryOverride = process.env.SOURCE_CATEGORY_OVERRIDE || ''; // If empty sourceCategoryOverride will not be overridden
+ var sourceHostOverride = process.env.SOURCE_HOST_OVERRIDE || ''; // If empty sourceHostOverride will not be set to the name of the logGroup
+ var sourceNameOverride = process.env.SOURCE_NAME_OVERRIDE || ''; // If empty sourceNameOverride will not be set to the name of the logStream
+
+ var retryInterval = process.env.RETRY_INTERVAL || 5000; // the interval in millisecs between retries
+ var numOfRetries = process.env.NUMBER_OF_RETRIES || 3; // the number of retries
+
+ var https = require('https');
+ var zlib = require('zlib');
+ var url = require('url');
+
+ Promise.retryMax = function(fn,retry,interval,fnParams) {
+ return fn.apply(this,fnParams).catch( err => {
+ var waitTime = typeof interval === 'function' ? interval() : interval;
+ console.log("Retries left " + (retry-1) + " delay(in ms) " + waitTime);
+ return (retry>1? Promise.wait(waitTime).then(()=> Promise.retryMax(fn,retry-1,interval, fnParams)):Promise.reject(err));
+ });
+ }
+
+ Promise.wait = function(delay) {
+ return new Promise((fulfill,reject)=> {
+ //console.log(Date.now());
+ setTimeout(fulfill,delay||0);
+ });
+ };
+
+ function exponentialBackoff(seed) {
+ var count = 0;
+ return function() {
+ count++;
+ return count*seed;
+ }
+ }
+
+ function postToSumo(callback, messages) {
+ var messagesTotal = Object.keys(messages).length;
+ var messagesSent = 0;
+ var messageErrors = [];
+
+ var urlObject = url.parse(SumoURL);
+ var options = {
+ 'hostname': urlObject.hostname,
+ 'path': urlObject.pathname,
+ 'method': 'POST'
+ };
+
+ var finalizeContext = function () {
+ var total = messagesSent + messageErrors.length;
+ if (total == messagesTotal) {
+ console.log('messagesSent: ' + messagesSent + ' messagesErrors: ' + messageErrors.length);
+ if (messageErrors.length > 0) {
+ callback('errors: ' + messageErrors);
+ } else {
+ callback(null, "Success");
+ }
+ }
+ };
+
+ function httpSend(options, headers, data) {
+ return new Promise( (resolve,reject) => {
+ var curOptions = options;
+ curOptions.headers = headers;
+ var req = https.request(curOptions, function (res) {
+ var body = '';
+ res.setEncoding('utf8');
+ res.on('data', function (chunk) {
+ body += chunk; // don't really do anything with body
+ });
+ res.on('end', function () {
+ if (res.statusCode == 200) {
+ resolve(body);
+ } else {
+ reject({'error':'HTTP Return code ' + res.statusCode,'res':res});
+ }
+ });
+ });
+ req.on('error', function (e) {
+ reject({'error':e,'res':null});
+ });
+ for (var i = 0; i < data.length; i++) {
+ req.write(JSON.stringify(data[i]) + '\n');
+ }
+ console.log("sending to Sumo...")
+ req.end();
+ });
+ }
+ Object.keys(messages).forEach(function (key, index) {
+ var headerArray = key.split(':');
+ var headers = {
+ 'X-Sumo-Name': headerArray[0],
+ 'X-Sumo-Category': headerArray[1],
+ 'X-Sumo-Host': headerArray[2],
+ 'X-Sumo-Client': 'cloudwatchevents-aws-lambda'
+ };
+ Promise.retryMax(httpSend, numOfRetries, retryInterval, [options, headers, messages[key]]).then((body)=> {
+ messagesSent++;
+ finalizeContext()
+ }).catch((e) => {
+ messageErrors.push(e.error);
+ finalizeContext();
+ });
+ });
+ }
+
+ exports.handler = function (event, context, callback) {
+
+ // Used to hold chunks of messages to post to SumoLogic
+ var messageList = {};
+ var final_event;
+ // Validate URL has been set
+ var urlObject = url.parse(SumoURL);
+ if (urlObject.protocol != 'https:' || urlObject.host === null || urlObject.path === null) {
+ callback('Invalid SUMO_ENDPOINT environment variable: ' + SumoURL);
+ }
+
+ //console.log(event);
+ if ((event.source==="aws.guardduty") || (removeOuterFields)) {
+ final_event =event.detail;
+ } else {
+ final_event = event;
+ }
+ messageList[sourceNameOverride+':'+sourceCategoryOverride+':'+sourceHostOverride]=[final_event];
+ postToSumo(callback, messageList);
+ };
+ Handler: index.handler
+ Role: !GetAtt
+ - CloudWatchEventFunctionRole
+ - Arn
+ Runtime: nodejs22.x
+ Timeout: 300
+ Environment:
+ Variables:
+ SUMO_ENDPOINT: !Ref SumoEndpointUrl
+ Tags:
+ - Key: 'lambda:createdBy'
+ Value: SAM
+ CloudWatchEventFunctionRole:
+ Type: 'AWS::IAM::Role'
+ Properties:
+ AssumeRolePolicyDocument:
+ Version: 2012-10-17
+ Statement:
+ - Action:
+ - 'sts:AssumeRole'
+ Effect: Allow
+ Principal:
+ Service:
+ - lambda.amazonaws.com
+ ManagedPolicyArns:
+ - 'arn:aws:iam::aws:policy/service-role/AWSLambdaBasicExecutionRole'
+ Tags:
+ - Key: 'lambda:createdBy'
+ Value: SAM
+ CloudWatchEventFunctionCloudWatchEventTrigger:
+ Type: 'AWS::Events::Rule'
+ Properties:
+ EventPattern:
+ source:
+ - aws.guardduty
+ Targets:
+ - Arn: !GetAtt
+ - CloudWatchEventFunction
+ - Arn
+ Id: CloudWatchEventFunctionCloudWatchEventTriggerLambdaTarget
+ CloudWatchEventFunctionCloudWatchEventTriggerPermission:
+ Type: 'AWS::Lambda::Permission'
+ Properties:
+ Action: 'lambda:InvokeFunction'
+ FunctionName: !Ref CloudWatchEventFunction
+ Principal: events.amazonaws.com
+ SourceArn: !GetAtt
+ - CloudWatchEventFunctionCloudWatchEventTrigger
+ - Arn
diff --git a/cloudwatchevents/package.json b/cloudwatchevents/package.json
index 28ace17..d66bbba 100644
--- a/cloudwatchevents/package.json
+++ b/cloudwatchevents/package.json
@@ -3,7 +3,7 @@
"version": "1.0.0",
"description": "AWS Lambda function to collect CloudWatch events and post them to SumoLogic.",
"main": "src/cloudwatchevents.js",
- "repository": "https://github.com/SumoLogic/sumologic-aws-lambda/tree/master/cloudwatchevents",
+ "repository": "https://github.com/SumoLogic/sumologic-aws-lambda/tree/main/cloudwatchevents",
"author": "Himanshu Pal",
"license": "Apache-2.0",
"dependencies": {},
diff --git a/cloudwatchevents/test/requirements.txt b/cloudwatchevents/test/requirements.txt
index 390a345..749893e 100644
--- a/cloudwatchevents/test/requirements.txt
+++ b/cloudwatchevents/test/requirements.txt
@@ -1,3 +1,3 @@
-requests==2.20.0
-boto3==1.5.1
-sumologic-sdk==0.1.11
\ No newline at end of file
+requests==2.32.5
+boto3==1.36.11
+sumologic-sdk==0.1.17
\ No newline at end of file
diff --git a/cloudwatchevents/test/test-guardduty-benchmark.py b/cloudwatchevents/test/test-guardduty-benchmark.py
index 7d9cbb5..85050df 100644
--- a/cloudwatchevents/test/test-guardduty-benchmark.py
+++ b/cloudwatchevents/test/test-guardduty-benchmark.py
@@ -10,6 +10,8 @@
import boto3
from sumologic import SumoLogic
+TIMEFORMAT = '%Y-%m-%dT%H:%M:%S'
+
# Update the below values in case the template locations are changed.
GUARD_DUTY_BENCHMARK_TEMPLATE = "guarddutybenchmark/template_v2.yaml"
@@ -95,7 +97,7 @@ def __init__(self, source_category, finding_types, delay):
def api_endpoint(self):
if SUMO_DEPLOYMENT == "us1":
return "https://api.sumologic.com/api"
- elif SUMO_DEPLOYMENT in ["ca", "au", "de", "eu", "jp", "us2", "fed", "in"]:
+ elif SUMO_DEPLOYMENT in ['au', 'ca', 'de', 'eu', 'fed', 'in', 'jp', 'kr', 'us1', 'us2']:
return "https://api.%s.sumologic.com/api" % SUMO_DEPLOYMENT
else:
return 'https://%s-api.sumologic.net/api' % SUMO_DEPLOYMENT
@@ -153,10 +155,18 @@ def delete_source(self, collector_id, source):
def fetch_logs(self):
raw_messages = []
# fetch Last 10 Minutes logs
- to_time = int(time.time()) * 1000
- from_time = to_time - self.delay * 60 * 1000
+ # Get the current time
+ to_time = datetime.datetime.now()
+ from_time = to_time - datetime.timedelta(minutes=self.delay+2)
+ from_time = from_time.strftime(TIMEFORMAT)
+ to_time = to_time.strftime(TIMEFORMAT)
+ print("Fetching records")
+ print("from_time", from_time)
+ print("to_time", to_time)
+
search_query = '_sourceCategory=%s' % self.source_category
- search_job_response = self.sumo.search_job(search_query, fromTime=from_time, toTime=to_time, timeZone="IST")
+ search_job_response = self.sumo.search_job(search_query, fromTime=from_time,
+ toTime=to_time, timeZone="IST")
print("Search Jobs API success with JOB ID as %s." % search_job_response["id"])
state = "GATHERING RESULTS"
message_count = 0
@@ -180,9 +190,25 @@ def fetch_logs(self):
print("Received message count as %s." % len(raw_messages))
return raw_messages
+ def fetch_logs_with_retry(self, retries=6, delay=60):
+ for attempt in range(1, retries + 1):
+ result = self.fetch_logs()
+ if len(result) >= 3:
+ return result
+ else:
+ print(f"Attempt {attempt}")
+ if attempt == retries:
+ print("All attempts failed.")
+ return result
+ else:
+ print(f"Retrying in {delay} seconds...")
+ self.delay += 1
+ time.sleep(delay)
+
# Validate the specific findings generated
def assert_logs(self):
- messages = self.fetch_logs()
+ messages = self.fetch_logs_with_retry()
+
for finding_type in self.findings:
try:
assert any((("type" in d and d["type"] == finding_type)
@@ -300,7 +326,7 @@ def setUp(self):
self.source_name = "GuardDuty Benchmark"
self.source_category = "Labs/test/guard/duty/benchmark"
self.finding_types = ["Policy:S3/AccountBlockPublicAccessDisabled", "Policy:S3/BucketPublicAccessGranted"]
- self.delay = 7
+ self.delay = 4
# Get GuardDuty details
self.guard_duty = boto3.client('guardduty', AWS_REGION)
diff --git a/cloudwatchlogs-with-dlq/DLQLambdaCloudFormation.json b/cloudwatchlogs-with-dlq/DLQLambdaCloudFormation.json
index 823c0c3..34fa427 100644
--- a/cloudwatchlogs-with-dlq/DLQLambdaCloudFormation.json
+++ b/cloudwatchlogs-with-dlq/DLQLambdaCloudFormation.json
@@ -56,7 +56,11 @@
"eu-north-1": {"bucketname": "appdevzipfiles-eu-north-1s"},
"eu-south-1": {"bucketname": "appdevzipfiles-eu-south-1"},
"me-south-1": {"bucketname": "appdevzipfiles-me-south-1s"},
- "sa-east-1": {"bucketname": "appdevzipfiles-sa-east-1"}
+ "sa-east-1": {"bucketname": "appdevzipfiles-sa-east-1"},
+ "me-central-1": {"bucketname": "appdevzipfiles-me-central-1"},
+ "eu-central-2": {"bucketname": "appdevzipfiles-eu-central-2ss"},
+ "ap-northeast-3": {"bucketname": "appdevzipfiles-ap-northeast-3s"},
+ "ap-southeast-3": {"bucketname": "appdevzipfiles-ap-southeast-3"}
}
},
"Resources": {
@@ -206,7 +210,7 @@
"FunctionName": { "Fn::Join": [ "-", [ "SumoCWLogsLambda", { "Fn::Select" : [ "2", {"Fn::Split" : [ "/" , { "Ref": "AWS::StackId" } ]}] } ] ] },
"Code": {
"S3Bucket": { "Fn::FindInMap" : [ "RegionMap", { "Ref" : "AWS::Region" }, "bucketname"]},
- "S3Key": "cloudwatchlogs-with-dlq.zip"
+ "S3Key": "cloudwatchLogsDLQ/v1.3.0/cloudwatchlogs-with-dlq.zip"
},
"Role": {
"Fn::GetAtt": [
@@ -224,7 +228,7 @@
}
},
"Handler": "cloudwatchlogs_lambda.handler",
- "Runtime": "nodejs14.x",
+ "Runtime": "nodejs22.x",
"MemorySize": 128,
"Environment": {
"Variables": {
@@ -268,7 +272,7 @@
"FunctionName": { "Fn::Join": [ "-", [ "SumoCWProcessDLQLambda", { "Fn::Select" : [ "2", {"Fn::Split" : [ "/" , { "Ref": "AWS::StackId" } ]}] } ] ] },
"Code": {
"S3Bucket": { "Fn::FindInMap" : [ "RegionMap", { "Ref" : "AWS::Region" }, "bucketname"]},
- "S3Key": "cloudwatchlogs-with-dlq.zip"
+ "S3Key": "cloudwatchLogsDLQ/v1.3.0/cloudwatchlogs-with-dlq.zip"
},
"Role": {
"Fn::GetAtt": [
@@ -286,7 +290,7 @@
]
}
},
- "Runtime": "nodejs14.x",
+ "Runtime": "nodejs22.x",
"MemorySize": 128,
"Environment": {
"Variables": {
diff --git a/cloudwatchlogs-with-dlq/DLQLambdaCloudFormationWithSecuredEndpoint.json b/cloudwatchlogs-with-dlq/DLQLambdaCloudFormationWithSecuredEndpoint.json
index 9ed7231..9d81037 100644
--- a/cloudwatchlogs-with-dlq/DLQLambdaCloudFormationWithSecuredEndpoint.json
+++ b/cloudwatchlogs-with-dlq/DLQLambdaCloudFormationWithSecuredEndpoint.json
@@ -56,7 +56,11 @@
"eu-north-1": {"bucketname": "appdevzipfiles-eu-north-1s"},
"eu-south-1": {"bucketname": "appdevzipfiles-eu-south-1"},
"me-south-1": {"bucketname": "appdevzipfiles-me-south-1s"},
- "sa-east-1": {"bucketname": "appdevzipfiles-sa-east-1"}
+ "sa-east-1": {"bucketname": "appdevzipfiles-sa-east-1"},
+ "me-central-1": {"bucketname": "appdevzipfiles-me-central-1"},
+ "eu-central-2": {"bucketname": "appdevzipfiles-eu-central-2ss"},
+ "ap-northeast-3": {"bucketname": "appdevzipfiles-ap-northeast-3s"},
+ "ap-southeast-3": {"bucketname": "appdevzipfiles-ap-southeast-3"}
}
},
"Resources": {
@@ -248,7 +252,7 @@
"FunctionName": { "Fn::Join": [ "-", [ "SumoCWLogsLambda", { "Fn::Select" : [ "2", {"Fn::Split" : [ "/" , { "Ref": "AWS::StackId" } ]}] } ] ] },
"Code": {
"S3Bucket": { "Fn::FindInMap" : [ "RegionMap", { "Ref" : "AWS::Region" }, "bucketname"]},
- "S3Key": "cloudwatchlogs-with-dlq.zip"
+ "S3Key": "cloudwatchLogsDLQ/v1.3.0/cloudwatchlogs-with-dlq.zip"
},
"Role": {
"Fn::GetAtt": [
@@ -266,7 +270,7 @@
}
},
"Handler": "cloudwatchlogs_lambda.handler",
- "Runtime": "nodejs14.x",
+ "Runtime": "nodejs22.x",
"MemorySize": 128,
"Environment": {
"Variables": {
@@ -309,7 +313,7 @@
"FunctionName": { "Fn::Join": [ "-", [ "SumoCWProcessDLQLambda", { "Fn::Select" : [ "2", {"Fn::Split" : [ "/" , { "Ref": "AWS::StackId" } ]}] } ] ] },
"Code": {
"S3Bucket": { "Fn::FindInMap" : [ "RegionMap", { "Ref" : "AWS::Region" }, "bucketname"]},
- "S3Key": "cloudwatchlogs-with-dlq.zip"
+ "S3Key": "cloudwatchLogsDLQ/v1.3.0/cloudwatchlogs-with-dlq.zip"
},
"Role": {
"Fn::GetAtt": [
@@ -327,7 +331,7 @@
]
}
},
- "Runtime": "nodejs14.x",
+ "Runtime": "nodejs22.x",
"MemorySize": 128,
"Environment": {
"Variables": {
diff --git a/cloudwatchlogs-with-dlq/DLQProcessor.js b/cloudwatchlogs-with-dlq/DLQProcessor.js
index e28e8ce..609d39e 100644
--- a/cloudwatchlogs-with-dlq/DLQProcessor.js
+++ b/cloudwatchlogs-with-dlq/DLQProcessor.js
@@ -1,77 +1,82 @@
-var AWS = require("aws-sdk");
-var processLogsHandler = require('./cloudwatchlogs_lambda').processLogs;
-var getEndpointURL = require('./cloudwatchlogs_lambda').getEndpointURL;
-var DLQUtils = require("./sumo-dlq-function-utils").DLQUtils;
-var Messages = DLQUtils.Messages;
-var invokeLambdas = DLQUtils.invokeLambdas;
+const { processLogs: processLogsHandler, getEndpointURL } = require('./cloudwatchlogs_lambda');
+const { DLQUtils } = require("./sumo-dlq-function-utils");
+
+const { Messages, invokeLambdas } = DLQUtils;
exports.consumeMessages = async function (env, context, callback) {
- var sqs = new AWS.SQS({region: env.AWS_REGION});
- var MessagesObj = new Messages(env);
- env.SUMO_CLIENT_HEADER="dlq-aws-lambda";
+ const MessagesObj = new Messages(env);
+ env.SUMO_CLIENT_HEADER = "dlq-aws-lambda";
+
if (!env.SUMO_ENDPOINT) {
- let SUMO_ENDPOINT = await getEndpointURL();
- if (SUMO_ENDPOINT instanceof Error) {
- console.log("Error in getEndpointURL: ", SUMO_ENDPOINT);
- callback(SUMO_ENDPOINT, null);
+ try {
+ let SUMO_ENDPOINT = await getEndpointURL();
+ env.SUMO_ENDPOINT = SUMO_ENDPOINT;
+ } catch (error) {
+ console.log("Error in getEndpointURL: ", error);
+ callback(error, null);
return;
}
- env.SUMO_ENDPOINT = SUMO_ENDPOINT;
} else {
console.log("consumeMessages: Getting SUMO_ENDPOINT from env");
}
- MessagesObj.receiveMessages(10, function (err, data) {
- var messages = (data)? data.Messages: null;
- if (err) {
- callback(err);
- } else if (messages && messages.length > 0) {
- var fail_cnt = 0, msgCount = 0;
+
+ try {
+ const messages = await MessagesObj.receiveMessages(10);
+
+
+ if (messages && messages.length > 0) {
+ let fail_cnt = 0, msgCount = 0;
console.log("Messages Received", messages.length);
- for (var i = 0; i < messages.length; i++) {
- (function(idx) {
- var payload = JSON.parse(messages[idx].Body);
- var receiptHandle = messages[idx].ReceiptHandle;
+
+ for (let i = 0; i < messages.length; i++) {
+ (function (idx) {
+ const payload = JSON.parse(messages[idx].Body);
+ const receiptHandle = messages[idx].ReceiptHandle;
+
if (!(payload.awslogs && payload.awslogs.data)) {
console.log("Message does not contain awslogs or awslogs.data attributes", payload);
- //deleting msg in DLQ after injesting in sumo
- MessagesObj.deleteMessage(receiptHandle, function (err, data) {
- if (err) console.log(err, err.stack);
- });
+
+ MessagesObj.deleteMessage(receiptHandle)
+ .catch((err) => console.log(err, err.stack));
+
return;
}
- var logdata = payload.awslogs.data;
+
+ const logdata = payload.awslogs.data;
+
processLogsHandler(env, logdata, function (err, msg) {
msgCount++;
+
if (err) {
console.log(err, msg);
fail_cnt++;
} else {
- //deleting msg in DLQ after injesting in sumo
- MessagesObj.deleteMessage(receiptHandle, function (err, data) {
- if (err) console.log(err, err.stack);
- });
+ MessagesObj.deleteMessage(receiptHandle)
+ .catch((err) => console.log(err, err.stack));
}
- if (msgCount == messages.length) {
- if (fail_cnt == 0 && (parseInt(env.is_worker) === 0)) {
+
+ if (msgCount === messages.length) {
+ if (fail_cnt === 0 && parseInt(env.is_worker) === 0) {
invokeLambdas(env.AWS_REGION, parseInt(env.NUM_OF_WORKERS),
- context.functionName, '{"is_worker": "1"}', context);
+ context.functionName, '{"is_worker": "1"}', context);
}
- callback(null, (messages.length-fail_cnt) + ' success');
+
+ callback(null, `${messages.length - fail_cnt} success`);
}
});
})(i);
}
-
} else {
+
callback(null, 'success');
}
- });
+ } catch (error) {
+ callback(error);
+ }
};
exports.handler = function (event, context, callback) {
-
- var env = Object.assign({}, process.env);
- env['is_worker'] = event.is_worker || 0;
+ const env = Object.assign({}, process.env);
+ env.is_worker = event.is_worker || 0;
exports.consumeMessages(env, context, callback);
-};
-
+};
\ No newline at end of file
diff --git a/cloudwatchlogs-with-dlq/cloudwatchlogs-with-dlq.zip b/cloudwatchlogs-with-dlq/cloudwatchlogs-with-dlq.zip
new file mode 100644
index 0000000..0763e67
Binary files /dev/null and b/cloudwatchlogs-with-dlq/cloudwatchlogs-with-dlq.zip differ
diff --git a/cloudwatchlogs-with-dlq/cloudwatchlogs_lambda.js b/cloudwatchlogs-with-dlq/cloudwatchlogs_lambda.js
index ec5a6ac..f0ca7e9 100644
--- a/cloudwatchlogs-with-dlq/cloudwatchlogs_lambda.js
+++ b/cloudwatchlogs-with-dlq/cloudwatchlogs_lambda.js
@@ -19,29 +19,25 @@ var url = require('url');
var vpcutils = require('./vpcutils');
var SumoLogsClient = require('./sumo-dlq-function-utils').SumoLogsClient;
var Utils = require('./sumo-dlq-function-utils').Utils;
-const AWS = require('aws-sdk');
-const ssm = new AWS.SSM();
+
+const { SSMClient, GetParameterCommand } = require("@aws-sdk/client-ssm");
exports.getEndpointURL = async function() {
- console.log('Getting SUMO_ENDPOINT from AWS SSM Parameter Store');
- return new Promise((resolve, reject) => {
- ssm.getParameter(
- {
- Name: 'SUMO_ENDPOINT',
- WithDecryption: true
- },
- (err, data) => {
- if (err) {
- console.log(err, err.stack);
- reject(new Error('Unable to get EndpointURL from SSM: ' + err));
- } else {
- // console.log(data);
- resolve(data.Parameter.Value);
- }
- }
- );
- });
-}
+ console.log('Getting SUMO_ENDPOINT from AWS SSM Parameter Store');
+ const ssmClient = new SSMClient();
+ try {
+ const data = await ssmClient.send(
+ new GetParameterCommand({
+ Name: 'SUMO_ENDPOINT',
+ WithDecryption: true
+ })
+ );
+ return data.Parameter.Value;
+ } catch (error) {
+ console.error('Unable to get EndpointURL from SSM:', error);
+ throw new Error('Unable to get EndpointURL from SSM: ' + error);
+ }
+ }
function createRecords(config, events, awslogsData) {
var records = [];
@@ -210,4 +206,4 @@ exports.handler = function (event, context, callback) {
exports.processLogs(process.env, event.awslogs.data, callback);
-};
+};
\ No newline at end of file
diff --git a/cloudwatchlogs-with-dlq/deploy_cwl_lambda.py b/cloudwatchlogs-with-dlq/deploy_cwl_lambda.py
new file mode 100644
index 0000000..be0744d
--- /dev/null
+++ b/cloudwatchlogs-with-dlq/deploy_cwl_lambda.py
@@ -0,0 +1,133 @@
+import boto3
+import argparse
+
+
+VERSION = "v1.3.0"
+AWS_PROFILE = "prod"
+
+REGION_MAPPING = {
+ "us-east-1": "appdevzipfiles-us-east-1",
+ "us-east-2": "appdevzipfiles-us-east-2",
+ "us-west-1": "appdevzipfiles-us-west-1",
+ "us-west-2": "appdevzipfiles-us-west-2",
+ "ap-south-1": "appdevzipfiles-ap-south-1",
+ "ap-northeast-2": "appdevzipfiles-ap-northeast-2",
+ "ap-southeast-1": "appdevzipfiles-ap-southeast-1",
+ "ap-southeast-2": "appdevzipfiles-ap-southeast-2",
+ "ap-northeast-1": "appdevzipfiles-ap-northeast-1",
+ "ca-central-1": "appdevzipfiles-ca-central-1",
+ "eu-central-1": "appdevzipfiles-eu-central-1",
+ "eu-west-1": "appdevzipfiles-eu-west-1",
+ "eu-west-2": "appdevzipfiles-eu-west-2",
+ "eu-west-3": "appdevzipfiles-eu-west-3",
+ "eu-north-1": "appdevzipfiles-eu-north-1s",
+ "sa-east-1": "appdevzipfiles-sa-east-1",
+ "ap-east-1": "appdevzipfiles-ap-east-1s",
+ "af-south-1": "appdevzipfiles-af-south-1s",
+ "eu-south-1": "appdevzipfiles-eu-south-1",
+ "me-south-1": "appdevzipfiles-me-south-1s",
+ "me-central-1": "appdevzipfiles-me-central-1",
+ "eu-central-2": "appdevzipfiles-eu-central-2ss",
+ "ap-northeast-3": "appdevzipfiles-ap-northeast-3s",
+ "ap-southeast-3": "appdevzipfiles-ap-southeast-3"
+}
+
+def get_bucket_name(region):
+ return REGION_MAPPING.get(region, None)
+
+
+def create_bucket(region):
+ """Create an S3 bucket in the specified region."""
+ s3 = boto3.client("s3", region_name=region)
+ bucket_name = get_bucket_name(region)
+
+ if not bucket_name:
+ print(f"No bucket mapping found for region: {region}")
+ return
+
+ try:
+ if region == "us-east-1":
+ response = s3.create_bucket(Bucket=bucket_name)
+ else:
+ response = s3.create_bucket(
+ Bucket=bucket_name,
+ CreateBucketConfiguration={"LocationConstraint": region},
+ )
+ print(f"Bucket created: {bucket_name} in {region}", response)
+ except Exception as e:
+ print(f"Error creating bucket {bucket_name}: {e}")
+
+
+def upload_code_to_s3(region):
+ """Upload the zip file to the specified S3 bucket."""
+ filename = "cloudwatchlogs-with-dlq.zip"
+ boto3.setup_default_session(profile_name=AWS_PROFILE)
+ s3 = boto3.client("s3", region_name=region)
+ bucket_name = get_bucket_name(region)
+
+ if not bucket_name:
+ print(f"No bucket mapping found for region: {region}")
+ return
+
+ try:
+ s3.upload_file(
+ filename, bucket_name, f"cloudwatchLogsDLQ/{VERSION}/{filename}",
+ ExtraArgs={"ACL": "public-read"}
+ )
+ print(f"Uploaded {filename} to S3 bucket ({bucket_name}) in region ({region})")
+ except Exception as e:
+ print(f"Error uploading {filename} to {bucket_name}: {e}")
+
+
+def upload_code_in_multiple_regions(regions):
+ """Upload code to all or specified regions."""
+ # for region in regions:
+ # create_bucket(region)
+
+ for region in regions:
+ upload_code_to_s3(region)
+
+
+def deploy(args):
+ """Deploy production artifacts to S3."""
+ if args.region == "all":
+ upload_code_in_multiple_regions(REGION_MAPPING.keys())
+ elif args.region in REGION_MAPPING.keys():
+ upload_code_to_s3(args.region)
+ else:
+ print("Invalid region. Please provide a valid AWS region or use 'all'.")
+
+ boto3.setup_default_session(profile_name=AWS_PROFILE)
+ s3 = boto3.client("s3", region_name="us-east-1")
+ bucket_name = "appdev-cloudformation-templates"
+
+ template_files = [
+ "DLQLambdaCloudFormation.json",
+ "DLQLambdaCloudFormationWithSecuredEndpoint.json"
+ ]
+
+ for filename in template_files:
+ try:
+ s3.upload_file(
+ filename, bucket_name, filename,
+ ExtraArgs={"ACL": "public-read"}
+ )
+ print(f"Uploaded {filename} to {bucket_name}")
+ except Exception as e:
+ print(f"Error uploading {filename}: {e}")
+
+ print("Deployment Successful: All files copied to Sumocontent")
+
+
+def main():
+ parser = argparse.ArgumentParser(description="Deploy files to S3")
+ parser.add_argument(
+ "-r", "--region", type=str, help="Specify a region or use 'all' to deploy to all configured regions"
+ )
+ args = parser.parse_args()
+ deploy(args)
+
+
+
+if __name__ == "__main__":
+ main()
\ No newline at end of file
diff --git a/cloudwatchlogs-with-dlq/package.json b/cloudwatchlogs-with-dlq/package.json
index 6ead6d9..f693c61 100644
--- a/cloudwatchlogs-with-dlq/package.json
+++ b/cloudwatchlogs-with-dlq/package.json
@@ -4,14 +4,14 @@
"description": "Lambda function for processing messages from CloudWatch with Dead Letter Queue Support",
"main": "DLQProcessor.js",
"dependencies": {
- "jmespath": "^0.15.0",
- "lodash": "^4.17.15"
+ "jmespath": "^0.16.0",
+ "lodash": "^4.17.21"
},
"devDependencies": {},
"scripts": {
"test": "node -e 'require('./test').test()'",
"build": "rm -f cloudwatchlogs-with-dlq.zip && npm install && zip -r cloudwatchlogs-with-dlq.zip DLQProcessor.js cloudwatchlogs_lambda.js vpcutils.js package.json sumo-dlq-function-utils/ node_modules/",
- "prod_deploy": "python -c 'from test_cwl_lambda import prod_deploy;prod_deploy()'"
+ "prod_deploy": "python deploy_cwl_lambda.py -r all"
},
"author": "Himanshu Pal",
"license": "Apache-2.0"
diff --git a/cloudwatchlogs-with-dlq/requirements.txt b/cloudwatchlogs-with-dlq/requirements.txt
index 4aa09d7..c08ca11 100644
--- a/cloudwatchlogs-with-dlq/requirements.txt
+++ b/cloudwatchlogs-with-dlq/requirements.txt
@@ -1,2 +1,2 @@
-requests==2.20.0
-boto3==1.5.1
+requests==2.32.5
+boto3==1.36.11
diff --git a/cloudwatchlogs-with-dlq/sumo-dlq-function-utils/lib/dlqutils.js b/cloudwatchlogs-with-dlq/sumo-dlq-function-utils/lib/dlqutils.js
index 65accc9..c19cfed 100644
--- a/cloudwatchlogs-with-dlq/sumo-dlq-function-utils/lib/dlqutils.js
+++ b/cloudwatchlogs-with-dlq/sumo-dlq-function-utils/lib/dlqutils.js
@@ -1,46 +1,54 @@
-var AWS = require("aws-sdk");
+const { SQSClient, ReceiveMessageCommand, DeleteMessageCommand } = require("@aws-sdk/client-sqs");
+const { LambdaClient, InvokeCommand } = require("@aws-sdk/client-lambda");
-function Messages(env) {
- this.sqs = new AWS.SQS({region: env.AWS_REGION});
+class Messages {
+ constructor(env) {
+ this.sqs = new SQSClient({ region: env.AWS_REGION });
this.env = env;
-}
+ }
-Messages.prototype.receiveMessages = function (messageCount, callback) {
- var params = {
- QueueUrl: this.env.TASK_QUEUE_URL,
- MaxNumberOfMessages: messageCount
+ async receiveMessages(messageCount) {
+ const params = {
+ QueueUrl: this.env.TASK_QUEUE_URL,
+ MaxNumberOfMessages: messageCount,
};
- this.sqs.receiveMessage(params, callback);
-};
-
-Messages.prototype.deleteMessage = function (receiptHandle, callback) {
- this.sqs.deleteMessage({
- ReceiptHandle: receiptHandle,
- QueueUrl: this.env.TASK_QUEUE_URL
- }, callback);
-};
-
-function invokeLambdas(awsRegion, numOfWorkers, functionName, payload, context) {
-
- for (var i = 0; i < numOfWorkers; i++) {
- var lambda = new AWS.Lambda({
- region: awsRegion
- });
- lambda.invoke({
- InvocationType: 'Event',
- FunctionName: functionName,
- Payload: payload
- }, function(err, data) {
- if (err) {
- context.fail(err);
- } else {
- context.succeed('success');
- }
- });
+
+ const command = new ReceiveMessageCommand(params);
+ const response = await this.sqs.send(command);
+ return response.Messages || [];
+ }
+
+ async deleteMessage(receiptHandle) {
+ const params = {
+ ReceiptHandle: receiptHandle,
+ QueueUrl: this.env.TASK_QUEUE_URL,
+ };
+
+ const command = new DeleteMessageCommand(params);
+ await this.sqs.send(command);
+ }
+}
+
+async function invokeLambdas(awsRegion, numOfWorkers, functionName, payload, context) {
+ const lambda = new LambdaClient({ region: awsRegion });
+
+ for (let i = 0; i < numOfWorkers; i++) {
+ const command = new InvokeCommand({
+ InvocationType: 'Event',
+ FunctionName: functionName,
+ Payload: payload,
+ });
+
+ try {
+ await lambda.send(command);
+ context.succeed('success');
+ } catch (err) {
+ context.fail(err);
}
+ }
}
module.exports = {
- Messages: Messages,
- invokeLambdas: invokeLambdas
-};
+ Messages,
+ invokeLambdas,
+};
\ No newline at end of file
diff --git a/cloudwatchlogs-with-dlq/sumo-dlq-function-utils/lib/sumologsclient.js b/cloudwatchlogs-with-dlq/sumo-dlq-function-utils/lib/sumologsclient.js
index 6566d5b..1cb6031 100644
--- a/cloudwatchlogs-with-dlq/sumo-dlq-function-utils/lib/sumologsclient.js
+++ b/cloudwatchlogs-with-dlq/sumo-dlq-function-utils/lib/sumologsclient.js
@@ -89,7 +89,13 @@ SumoLogsClient.prototype.postToSumo = function(messages, is_compressed) {
if (obj.status === "SUCCESS") {
requestSuccessCnt += 1;
} else {
- var message = obj.error?obj.error.message:obj.response.statusMessage;
+ if (obj.error){
+ var message = obj.error.message;
+ } else if(obj.response){
+ var message = obj.response.statusMessage;
+ }else{
+ var message = JSON.stringify(obj);
+ }
messageErrors.push(message);
failedBucketKeys.push(obj.failedBucketKey);
}
diff --git a/cloudwatchlogs-with-dlq/sumo-dlq-function-utils/package.json b/cloudwatchlogs-with-dlq/sumo-dlq-function-utils/package.json
index fb901de..a0b8eec 100644
--- a/cloudwatchlogs-with-dlq/sumo-dlq-function-utils/package.json
+++ b/cloudwatchlogs-with-dlq/sumo-dlq-function-utils/package.json
@@ -6,7 +6,7 @@
"files": ["./lib"],
"main": "./lib/mainindex",
"dependencies": {
- "aws-sdk": "^2.160.0"
+ "aws-sdk": "^2.1633.0"
},
"devDependencies": {},
"author": "Himanshu Pal"
diff --git a/cloudwatchlogs-with-dlq/test_cwl_lambda.py b/cloudwatchlogs-with-dlq/test_cwl_lambda.py
index 3a0c9db..0d967e2 100644
--- a/cloudwatchlogs-with-dlq/test_cwl_lambda.py
+++ b/cloudwatchlogs-with-dlq/test_cwl_lambda.py
@@ -5,24 +5,100 @@
import os
import sys
import datetime
+import uuid
BUCKET_PREFIX = "appdevstore"
-
+VERSION = "v1.3.0"
+AWS_PROFILE = "default"
class TestLambda(unittest.TestCase):
TEMPLATE_KEYS_TO_REMOVE = ['SumoCWProcessDLQScheduleRule',
'SumoCWEventsInvokeLambdaPermission']
+ REGION_MAPPING = {
+ "us-east-1": "appdevstore--us-east-1",
+ "us-east-2": "appdevstore--us-east-2",
+ "us-west-1": "appdevstore--us-west-1",
+ "us-west-2": "appdevstore--us-west-2",
+ "ap-south-1": "appdevstore--ap-south-1",
+ "ap-northeast-2": "appdevstore--ap-northeast-2",
+ "ap-southeast-1": "appdevstore--ap-southeast-1",
+ "ap-southeast-2": "appdevstore--ap-southeast-2",
+ "ap-northeast-1": "appdevstore--ap-northeast-1",
+ "ca-central-1": "appdevstore--ca-central-1",
+ "eu-central-1": "appdevstore--eu-central-1",
+ "eu-west-1": "appdevstore--eu-west-1",
+ "eu-west-2": "appdevstore--eu-west-2",
+ "eu-west-3": "appdevstore--eu-west-3",
+ "eu-north-1": "appdevstore--eu-north-1s",
+ "sa-east-1": "appdevstore--sa-east-1",
+ "ap-east-1": "appdevstore--ap-east-1s",
+ "af-south-1": "appdevstore--af-south-1s",
+ "eu-south-1": "appdevstore--eu-south-1",
+ "me-south-1": "appdevstore--me-south-1s",
+ "me-central-1": "appdevstore--me-central-1",
+ "eu-central-2": "appdevstore--eu-central-2ss",
+ "ap-northeast-3": "appdevstore--ap-northeast-3s",
+ "ap-southeast-3": "appdevstore--ap-southeast-3"
+ }
+
+ def get_bucket_name(self, region):
+ return self.REGION_MAPPING[region]
+
+ @staticmethod
+ def generate_32bit_uuid():
+ return uuid.uuid4().int & 0xFFFFFFFF # Extract only the last 32 bits
+
+ def bucket_exists(self, s3, bucket_name):
+ """Check if an S3 bucket exists."""
+ try:
+ s3.head_bucket(Bucket=bucket_name)
+ return True
+ except Exception:
+ return False
+
+ def create_bucket(self, region, bucket_name):
+ """Create an S3 bucket in the specified region if it does not exist."""
+ s3 = boto3.client("s3", region_name=region)
+
+ if not bucket_name:
+ print(f"No bucket mapping found for region: {region}")
+ return
+
+ if self.bucket_exists(s3, bucket_name):
+ print(f"Bucket {bucket_name} already exists in {region}.")
+ return
+
+ try:
+ if region == "us-east-1":
+ response = s3.create_bucket(Bucket=bucket_name)
+ else:
+ response = s3.create_bucket(
+ Bucket=bucket_name,
+ CreateBucketConfiguration={"LocationConstraint": region},
+ )
+ print(f"Bucket created: {bucket_name} in {region}", response)
+ except Exception as e:
+ print(f"Error creating bucket {bucket_name}: {e}")
+
+ def upload_code_in_s3(self, region):
+ filename = 'cloudwatchlogs-with-dlq.zip'
+ boto3.setup_default_session(profile_name=AWS_PROFILE)
+ s3 = boto3.client('s3', region)
+ print("Uploading zip file %s in S3 bucket (%s) at region (%s)" % (filename, self.bucket_name, region))
+ s3.upload_file(filename, self.bucket_name, f"cloudwatchLogsDLQ/{VERSION}/{filename}")
+
def setUp(self):
self.DLQ_QUEUE_NAME = 'SumoCWDeadLetterQueue'
self.DLQ_Lambda_FnName = 'SumoCWProcessDLQLambda'
self.config = {
'AWS_REGION_NAME': os.environ.get("AWS_DEFAULT_REGION",
- "us-east-2")
+ "ap-southeast-1")
}
self.stack_name = "TestCWLStack-%s" % (
datetime.datetime.now().strftime("%d-%m-%y-%H-%M-%S"))
+ boto3.setup_default_session(profile_name=AWS_PROFILE)
self.cf = boto3.client('cloudformation',
self.config['AWS_REGION_NAME'])
self.template_name = 'DLQLambdaCloudFormation.json'
@@ -32,17 +108,27 @@ def setUp(self):
raise Exception("SumoEndPointURL environment variables are not set")
self.template_data = self._parse_template(self.template_name)
# replacing prod zipfile location to test zipfile location
- self.template_data = self.template_data.replace("appdevzipfiles", BUCKET_PREFIX)
- RUNTIME = "nodejs%s" % os.environ.get("NODE_VERSION", "10.x")
- self.template_data = self.template_data.replace("nodejs10.x", RUNTIME)
+ bucket_name = self.get_bucket_name(self.config['AWS_REGION_NAME'])
+ bucket_uuid = str(self.generate_32bit_uuid())
+ self.bucket_name = bucket_name.replace("", bucket_uuid)
+ # create new bucket
+ self.create_bucket(self.config['AWS_REGION_NAME'], self.bucket_name)
+ bucket_prefix = bucket_name.split("")[0]
+ bucket_uuid_prefix = f"{bucket_prefix}{bucket_uuid}"
+ self.template_data = self.template_data.replace("appdevzipfiles", bucket_uuid_prefix)
+ RUNTIME = "nodejs%s" % os.environ.get("NODE_VERSION", "22.x")
+ self.template_data = self.template_data.replace("nodejs22.x", RUNTIME)
+ print("self.bucket_name", self.bucket_name)
+ print("self.template_data", self.template_data)
def tearDown(self):
if self.stack_exists(self.stack_name):
self.delete_stack()
+ self.delete_s3_bucket(self.bucket_name)
def test_lambda(self):
- upload_code_in_S3(self.config['AWS_REGION_NAME'])
+ self.upload_code_in_s3(self.config['AWS_REGION_NAME'])
self.create_stack()
print("Testing Stack Creation")
self.assertTrue(self.stack_exists(self.stack_name))
@@ -51,6 +137,20 @@ def test_lambda(self):
self.invoke_lambda()
self.check_consumed_messages_count()
+ def delete_s3_bucket(self, bucket_name):
+ s3 = boto3.resource('s3')
+ bucket = s3.Bucket(bucket_name)
+
+ # Delete all objects
+ bucket.objects.all().delete()
+
+ # Delete all object versions (if versioning is enabled)
+ bucket.object_versions.all().delete()
+
+ # Delete the bucket
+ bucket.delete()
+ print(f"Bucket '{bucket_name}' and all objects deleted successfully.")
+
def stack_exists(self, stack_name):
stacks = self.cf.list_stacks()['StackSummaries']
for stack in stacks:
@@ -91,7 +191,7 @@ def delete_stack(self):
waiter.wait(StackName=self.stack_name)
def _get_dlq_url(https://github.com/self):
- if (not hasattr(self, 'dlq_queue_url')):
+ if not hasattr(self, 'dlq_queue_url'):
sqs = boto3.resource('sqs', self.config['AWS_REGION_NAME'])
queue_name = self._get_queue_name(sqs, self.DLQ_QUEUE_NAME)
queue = sqs.get_queue_by_name(QueueName=queue_name)
@@ -103,11 +203,12 @@ def insert_mock_logs_in_DLQ(self):
print("Inserting fake logs in DLQ")
dlq_queue_url = self._get_dlq_url()
sqs_client = boto3.client('sqs', self.config['AWS_REGION_NAME'])
- mock_logs = json.load(open('cwlfixtures.json'))
+ with open('cwlfixtures.json', 'r', encoding='UTF-8') as file:
+ mock_logs = json.load(file)
for log in mock_logs:
sqs_client.send_message(QueueUrl=dlq_queue_url,
MessageBody=json.dumps(log))
- sleep(15) # waiting for messages to be ingested in SQS
+ sleep(60) # waiting for messages to be ingested in SQS
self.initial_log_count = self._get_message_count()
print("Inserted %s Messages in %s" % (
self.initial_log_count, dlq_queue_url))
@@ -148,14 +249,14 @@ def check_consumed_messages_count(self):
print("Testing number of consumed messages initial: %s final: %s processed: %s" % (
self.initial_log_count, final_message_count,
self.initial_log_count - final_message_count))
- self.assertGreater(self.initial_log_count, final_message_count)
+ self.assertEqual(self.initial_log_count, final_message_count)
def _parse_template(self, template):
with open(template) as template_fileobj:
template_data = template_fileobj.read()
print("Validating cloudformation template")
self.cf.validate_template(TemplateBody=template_data)
- #removing schedulerule to prevent lambda being triggered while testing
+ #removing schedule rule to prevent lambda being triggered while testing
#becoz we are invoking lambda directly
template_data = eval(template_data)
template_data["Parameters"]["SumoEndPointURL"]["Default"] = self.sumo_endpoint_url
@@ -165,92 +266,6 @@ def _parse_template(self, template):
return template_data
-def upload_code_in_multiple_regions():
- regions = [
- "us-east-2",
- "us-east-1",
- "us-west-1",
- "us-west-2",
- "ap-south-1",
- "ap-northeast-2",
- "ap-southeast-1",
- "ap-southeast-2",
- "ap-northeast-1",
- "ca-central-1",
- # "cn-north-1",
- "eu-central-1",
- "eu-west-1",
- "eu-west-2",
- "eu-west-3",
- "sa-east-1"
- ]
-
- # for region in regions:
- # create_bucket(region)
-
- for region in regions:
- upload_code_in_S3(region)
-
-
-def get_bucket_name(region):
- return '%s-%s' % (BUCKET_PREFIX, region)
-
-
-def create_bucket(region):
- s3 = boto3.client('s3', region)
- bucket_name = get_bucket_name(region)
- if region == "us-east-1":
- response = s3.create_bucket(Bucket=bucket_name)
- else:
- response = s3.create_bucket(Bucket=bucket_name,
- CreateBucketConfiguration={
- 'LocationConstraint': region
- })
- print("Creating bucket", region, response)
-
-
-def upload_code_in_S3(region):
- filename = 'cloudwatchlogs-with-dlq.zip'
- print("Uploading zip file %s in S3 %s" % (filename, region))
- s3 = boto3.client('s3', region)
- bucket_name = get_bucket_name(region)
- s3.upload_file(filename, bucket_name, filename,
- ExtraArgs={'ACL': 'public-read'})
-
-
-def generate_fixtures(region, count):
- data = []
- sqs = boto3.client('sqs', region)
- for x in range(0, count, 10):
- response = sqs.receive_message(
- QueueUrl='https://sqs.us-east-2.amazonaws.com/456227676011/SumoCWDeadLetterQueue',
- MaxNumberOfMessages=10,
- )
- for msg in response['Messages']:
- data.append(eval(msg['Body']))
-
- return data[:count]
-
-
-def prod_deploy():
- global BUCKET_PREFIX
- BUCKET_PREFIX = 'appdevzipfiles'
- upload_code_in_multiple_regions()
- s3 = boto3.client('s3', "us-east-1")
- filename = 'DLQLambdaCloudFormation.json'
- print("Uploading template file: %s in S3" % filename)
- bucket_name = "appdev-cloudformation-templates"
- s3.upload_file(filename, bucket_name, filename,
- ExtraArgs={'ACL': 'public-read'})
- filename = 'DLQLambdaCloudFormationWithSecuredEndpoint.json'
- print("Uploading template file: %s in S3" % filename)
- s3.upload_file(filename, bucket_name, filename,
- ExtraArgs={'ACL': 'public-read'})
- print("Deployment Successfull: ALL files copied to Sumocontent")
-
if __name__ == '__main__':
- if len(sys.argv) > 1:
- BUCKET_PREFIX = sys.argv.pop()
-
unittest.main()
diff --git a/cloudwatchlogs-with-dlq/testevent.json b/cloudwatchlogs-with-dlq/testevent.json
new file mode 100644
index 0000000..aa184c1
--- /dev/null
+++ b/cloudwatchlogs-with-dlq/testevent.json
@@ -0,0 +1,5 @@
+{
+ "awslogs": {
+ "data": "H4sIAAAAAAAAAHWPwQqCQBCGX0Xm7EFtK+smZBEUgXoLCdMhFtKV3akI8d0bLYmibvPPN3wz00CJxmQnTO41whwWQRIctmEcB6sQbFC3CjW3XW8kxpOpP+OC22d1Wml1qZkQGtoMsScxaczKN3plG8zlaHIta5KqWsozoTYw3/djzwhpLwivWFGHGpAFe7DL68JlBUk+l7KSN7tCOEJ4M3/qOI49vMHj+zCKdlFqLaU2ZHV2a4Ct/an0/ivdX8oYc1UVX860fQDQiMdxRQEAAA=="
+ }
+}
diff --git a/cloudwatchlogs-with-dlq/vpcutils.js b/cloudwatchlogs-with-dlq/vpcutils.js
index c570323..4ceac28 100644
--- a/cloudwatchlogs-with-dlq/vpcutils.js
+++ b/cloudwatchlogs-with-dlq/vpcutils.js
@@ -1,7 +1,10 @@
-var find = require('lodash').find;
-var EC2 = require('aws-sdk/clients/ec2');
+// Import the required AWS SDK modules
+const { EC2Client, DescribeNetworkInterfacesCommand } = require('@aws-sdk/client-ec2');
var jmespath = require('jmespath');
-var ec2 = null;
+var find = require('lodash').find;
+// Create an instance of the EC2 client
+const ec2Client = new EC2Client({ region: process.env.AWS_REGION });
+
/*
VPC Log Format
version The VPC Flow Logs version.
@@ -50,19 +53,25 @@ function discardInternalTraffic(vpcCIDRPrefix, records) {
*
* @return `Promise` for async processing
*/
-function listNetworkInterfaces(allIPaddresses) {
- if (!ec2) {
- ec2 = new EC2({region: process.env.AWS_REGION});
- }
- var params = {
- Filters: [
- {
- Name: 'private-ip-address',
- Values: allIPaddresses
- }
- ]
- }
- return ec2.describeNetworkInterfaces(params).promise();
+async function listNetworkInterfaces(allIPaddresses) {
+ const params = {
+ Filters: [
+ {
+ Name: 'private-ip-address',
+ Values: allIPaddresses,
+ },
+ ],
+ };
+
+ const command = new DescribeNetworkInterfacesCommand(params);
+
+ try {
+ const response = await ec2Client.send(command);
+ return response;
+ } catch (err) {
+ console.log('Error in listNetworkInterfaces', err);
+ throw err;
+ }
}
/**
@@ -145,4 +154,4 @@ function includeSecurityGroupIds(records) {
module.exports = {
discardInternalTraffic: discardInternalTraffic,
includeSecurityGroupIds: includeSecurityGroupIds
-};
+};
\ No newline at end of file
diff --git a/kinesis-firehose-cloudwatch-collection/logs/KinesisFirehoseCWLogs.template.yaml b/kinesis-firehose-cloudwatch-collection/logs/KinesisFirehoseCWLogs.template.yaml
new file mode 100644
index 0000000..336a2ea
--- /dev/null
+++ b/kinesis-firehose-cloudwatch-collection/logs/KinesisFirehoseCWLogs.template.yaml
@@ -0,0 +1,348 @@
+AWSTemplateFormatVersion: '2010-09-09'
+Description: "Template to setup the AWS CloudWatch Logs collection using Amazon Kinesis Firehose and send the data to provided Sumo Logic URL."
+
+Metadata:
+ AWS::ServerlessRepo::Application:
+ Author: Sumo Logic
+ Description: This solution helps to setup the AWS CloudWatch Logs collection using Amazon Kinesis Firehose and send the data to provided Sumo Logic URL.
+ HomePageUrl: https://github.com/SumoLogic/sumologic-aws-lambda
+ Labels:
+ - lambda
+ - sumologic
+ - serverless
+ - kinesis
+ - firehose
+ - cloudwatch
+ - logs
+ Name: sumologic-kinesis-cloudwatch-logs
+ SemanticVersion: 1.0.0
+ SourceCodeUrl: https://github.com/SumoLogic/sumologic-aws-lambda/tree/master/kinesis-firehose-cloudwatch-collection/logs
+ LicenseUrl: ./LICENSE
+ ReadmeUrl: ./README.md
+ SpdxLicenseId: Apache-2.0
+
+ 'AWS::CloudFormation::Interface':
+ ParameterGroups:
+ - Label:
+ default: "1. Sumo Logic Kinesis Firehose Logs Configuration"
+ Parameters:
+ - Section1aSumoLogicKinesisLogsURL
+
+ - Label:
+ default: "2. Failed Data AWS S3 Bucket Configuration"
+ Parameters:
+ - Section2aCreateS3Bucket
+ - Section2bFailedDataS3Bucket
+
+ ParameterLabels:
+ Section1aSumoLogicKinesisLogsURL:
+ default: "Sumo Logic AWS Kinesis Firehose for Logs Source URL"
+
+ Section2aCreateS3Bucket:
+ default: "Create AWS S3 Bucket"
+ Section2bFailedDataS3Bucket:
+ default: "AWS S3 Bucket Name for Failed Data"
+
+Parameters:
+ Section1aSumoLogicKinesisLogsURL:
+ Type: String
+ Description: "Provide HTTP Source Address from AWS Kinesis Firehose for Logs source created on your Sumo Logic account."
+ AllowedPattern: ".+"
+ ConstraintDescription: "Sumo Logic AWS Kinesis Firehose for Logs Source URL can not be empty."
+
+ Section2aCreateS3Bucket:
+ Type: String
+ Default: 'Yes'
+ Description: "Yes - Create a new AWS S3 Bucket to store failed data.'.
+ No - Use an existing AWS S3 Bucket to store failed data."
+ AllowedValues:
+ - 'Yes'
+ - 'No'
+ Section2bFailedDataS3Bucket:
+ Type: String
+ Description: "Provide a unique name of AWS S3 bucket where you would like to store Failed data. In case of existing AWS S3 bucket, provide the bucket from the current AWS Account. For Logs, failed data will be stored in folder prefix as SumoLogic-Kinesis-Failed-Logs."
+ AllowedPattern: "[a-z0-9-.]{3,63}$"
+ ConstraintDescription: "3-63 characters; must contain only lowercase letters, numbers, hyphen or period. For more details - https://docs.aws.amazon.com/AmazonS3/latest/userguide/bucketnamingrules.html"
+
+Conditions:
+ create_bucket: !Equals [ !Ref Section2aCreateS3Bucket, 'Yes' ]
+
+Mappings:
+ ArnValue:
+ us-east-1:
+ ArnValue: "aws"
+ us-east-2:
+ ArnValue: "aws"
+ us-west-1:
+ ArnValue: "aws"
+ us-west-2:
+ ArnValue: "aws"
+ af-south-1:
+ ArnValue: "aws"
+ ca-central-1:
+ ArnValue: "aws"
+ eu-central-1:
+ ArnValue: "aws"
+ eu-west-1:
+ ArnValue: "aws"
+ eu-west-2:
+ ArnValue: "aws"
+ eu-south-1:
+ ArnValue: "aws"
+ eu-west-3:
+ ArnValue: "aws"
+ eu-north-1:
+ ArnValue: "aws"
+ ap-east-1:
+ ArnValue: "aws"
+ ap-northeast-1:
+ ArnValue: "aws"
+ ap-northeast-2:
+ ArnValue: "aws"
+ ap-northeast-3:
+ ArnValue: "aws"
+ ap-southeast-1:
+ ArnValue: "aws"
+ ap-southeast-2:
+ ArnValue: "aws"
+ ap-south-1:
+ ArnValue: "aws"
+ me-south-1:
+ ArnValue: "aws"
+ sa-east-1:
+ ArnValue: "aws"
+ us-gov-west-1:
+ ArnValue: "aws-us-gov"
+ us-gov-east-1:
+ ArnValue: "aws-us-gov"
+ cn-north-1:
+ ArnValue: "aws-cn"
+ cn-northwest-1:
+ ArnValue: "aws-cn"
+
+Resources:
+ # Common Resources including creating bucket, create logs role and attach bucket policy.
+ FailedDataBucket:
+ Type: AWS::S3::Bucket
+ Condition: create_bucket
+ Metadata:
+ cfn_nag:
+ rules_to_suppress:
+ - id: W51
+ reason: "Role has been assigned permission to put logs in AWS S3 bucket."
+ - id: W35
+ reason: "Access logging not required for AWS S3 Bucket."
+ - id: W41
+ reason: "Encryption not required for AWS S3 Bucket."
+ Properties:
+ BucketName: !Ref Section2bFailedDataS3Bucket
+ AccessControl: Private
+ PublicAccessBlockConfiguration:
+ BlockPublicAcls: true
+ BlockPublicPolicy: true
+ IgnorePublicAcls: true
+ RestrictPublicBuckets: true
+
+ FirehoseLogsRole:
+ Type: AWS::IAM::Role
+ Properties:
+ AssumeRolePolicyDocument:
+ Version: '2012-10-17'
+ Statement:
+ - Sid: 'FirehoseAssumeRole'
+ Effect: Allow
+ Principal:
+ Service: firehose.amazonaws.com
+ Action: sts:AssumeRole
+ Condition:
+ StringEquals:
+ sts:ExternalId: !Ref "AWS::AccountId"
+
+ AttachBucketPolicyToFirehoseLogsRole:
+ Type: AWS::IAM::Policy
+ Properties:
+ PolicyName: "S3BucketPermissionPolicy"
+ PolicyDocument:
+ Version: '2012-10-17'
+ Statement:
+ - Effect: Allow
+ Action:
+ - s3:AbortMultipartUpload
+ - s3:GetBucketLocation
+ - s3:GetObject
+ - s3:ListBucket
+ - s3:ListBucketMultipartUploads
+ - s3:PutObject
+ Resource:
+ - !Sub
+ - "arn:${arn}:s3:::${S3Bucket}/*"
+ - arn: !FindInMap [ArnValue, !Ref "AWS::Region", ArnValue]
+ S3Bucket: !Ref Section2bFailedDataS3Bucket
+ - !Sub
+ - "arn:${arn}:s3:::${S3Bucket}"
+ - arn: !FindInMap [ArnValue, !Ref "AWS::Region", ArnValue]
+ S3Bucket: !Ref Section2bFailedDataS3Bucket
+ Roles:
+ - Ref: FirehoseLogsRole
+
+ # Resources for AWS Kinesis Logs including log group, stream, delivery stream, subscription filter, policy and role.
+ KinesisLogsLogGroup:
+ Type: AWS::Logs::LogGroup
+ Properties:
+ LogGroupName: !Sub
+ - "/aws/kinesisfirehose/Kinesis-Logs-${StackID}"
+ - StackID: !Select
+ - 0
+ - !Split
+ - "-"
+ - !Select
+ - 2
+ - !Split ["/", !Ref "AWS::StackId"]
+ RetentionInDays: 7
+
+ KinesisLogsLogStream:
+ Type: AWS::Logs::LogStream
+ Properties:
+ LogGroupName: !Ref KinesisLogsLogGroup
+ LogStreamName: "HttpEndpointDelivery"
+
+ KinesisLogsLogStreamS3:
+ Type: AWS::Logs::LogStream
+ Properties:
+ LogGroupName: !Ref KinesisLogsLogGroup
+ LogStreamName: "S3Delivery"
+
+ KinesisLogsRole:
+ Type: AWS::IAM::Role
+ Properties:
+ AssumeRolePolicyDocument:
+ Version: '2012-10-17'
+ Statement:
+ - Sid: 'LogsAssumeRole'
+ Effect: Allow
+ Principal:
+ Service: !Sub "logs.${AWS::Region}.amazonaws.com"
+ Action: sts:AssumeRole
+
+ KinesisLogsRolePolicy:
+ Type: AWS::IAM::Policy
+ Properties:
+ PolicyName: "KinesisFirehoseLogsPolicy"
+ PolicyDocument:
+ Version: '2012-10-17'
+ Statement:
+ - Effect: Allow
+ Action:
+ - firehose:PutRecord
+ - firehose:PutRecordBatch
+ - kinesis:PutRecord
+ - kinesis:PutRecordBatch
+ Resource: !Sub
+ - "arn:${arn}:firehose:${Region}:${AccountId}:*"
+ - arn: !FindInMap [ArnValue, !Ref "AWS::Region", ArnValue]
+ Region: !Ref "AWS::Region"
+ AccountId: !Ref "AWS::AccountId"
+ - Effect: Allow
+ Action:
+ - iam:PassRole
+ Resource: !Sub
+ - "arn:${arn}:iam::${AccountId}:role/${KinesisLogsRole}"
+ - arn: !FindInMap [ArnValue, !Ref "AWS::Region", ArnValue]
+ AccountId: !Ref "AWS::AccountId"
+ KinesisLogsRole: !Ref KinesisLogsRole
+ Roles:
+ - Ref: KinesisLogsRole
+
+ KinesisLogsDeliveryStream:
+ Type: AWS::KinesisFirehose::DeliveryStream
+ Properties:
+ DeliveryStreamName: !Sub
+ - "Kinesis-Logs-${StackID}"
+ - StackID: !Select
+ - 0
+ - !Split
+ - "-"
+ - !Select
+ - 2
+ - !Split ["/", !Ref "AWS::StackId"]
+ DeliveryStreamType: DirectPut
+ HttpEndpointDestinationConfiguration:
+ RoleARN: !GetAtt FirehoseLogsRole.Arn
+ EndpointConfiguration:
+ Url: !Ref Section1aSumoLogicKinesisLogsURL
+ Name: !Sub "${AWS::StackName}-sumologic-logs-endpoint"
+ RequestConfiguration:
+ ContentEncoding: GZIP
+ CloudWatchLoggingOptions:
+ Enabled: true
+ LogGroupName: !Ref KinesisLogsLogGroup
+ LogStreamName: !Ref KinesisLogsLogStream
+ BufferingHints:
+ IntervalInSeconds: 60
+ SizeInMBs: 4
+ RetryOptions:
+ DurationInSeconds: 60
+ S3BackupMode: FailedDataOnly
+ S3Configuration:
+ BucketARN: !If
+ - create_bucket
+ - !GetAtt FailedDataBucket.Arn
+ - !Sub
+ - "arn:${arn}:s3:::${S3Bucket}"
+ - arn: !FindInMap [ArnValue, !Ref "AWS::Region", ArnValue]
+ S3Bucket: !Ref Section2bFailedDataS3Bucket
+ CompressionFormat: UNCOMPRESSED
+ ErrorOutputPrefix: "SumoLogic-Kinesis-Failed-Logs/"
+ RoleARN: !GetAtt FirehoseLogsRole.Arn
+ CloudWatchLoggingOptions:
+ Enabled: true
+ LogGroupName: !Ref KinesisLogsLogGroup
+ LogStreamName: !Ref KinesisLogsLogStreamS3
+
+ KinesisLogsFirehoseRolePolicy:
+ Type: AWS::IAM::Policy
+ Properties:
+ PolicyName: "KinesisLogsLogStreamPermissionPolicy"
+ PolicyDocument:
+ Version: '2012-10-17'
+ Statement:
+ - Effect: Allow
+ Action:
+ - logs:PutLogEvents
+ Resource:
+ - !GetAtt KinesisLogsLogGroup.Arn
+ - !Sub "${KinesisLogsLogGroup.Arn}:log-stream:*"
+ - Effect: Allow
+ Action:
+ - kinesis:DescribeStream
+ - kinesis:GetShardIterator
+ - kinesis:GetRecords
+ - kinesis:ListShards
+ Resource:
+ - !GetAtt KinesisLogsDeliveryStream.Arn
+ Roles:
+ - Ref: FirehoseLogsRole
+
+ SubscriptionFilter:
+ Type: AWS::Logs::SubscriptionFilter
+ Properties:
+ RoleArn: !GetAtt KinesisLogsRole.Arn
+ LogGroupName: !Ref KinesisLogsLogGroup
+ FilterPattern: ''
+ DestinationArn: !GetAtt KinesisLogsDeliveryStream.Arn
+
+Outputs:
+ FailedDataBucketArn:
+ Description: "S3 Bucket Arn where failed deliveries will be saved"
+ Condition: create_bucket
+ Value: !GetAtt FailedDataBucket.Arn
+
+ KinesisLogsDeliveryStreamARN:
+ Description: "The ARN for your Kinesis Firehose Delivery Stream, use this as the destination when adding CloudWatch Logs subscription filters."
+ Value: !GetAtt KinesisLogsDeliveryStream.Arn
+ KinesisLogsRoleARN:
+ Description: "The ARN for your CloudWatch Logs role to write to your delivery stream, use this as the role-arn when adding CloudWatch Logs subscription filters."
+ Value: !GetAtt KinesisLogsRole.Arn
+ KinesisLogsLogGroupARN:
+ Description: "The CloudWatch log group name where kinesis stream logs will be sent."
+ Value: !Ref KinesisLogsLogGroup
\ No newline at end of file
diff --git a/kinesis-firehose-cloudwatch-collection/logs/LICENSE b/kinesis-firehose-cloudwatch-collection/logs/LICENSE
new file mode 100644
index 0000000..ba07b59
--- /dev/null
+++ b/kinesis-firehose-cloudwatch-collection/logs/LICENSE
@@ -0,0 +1,201 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright 2021 Sumo Logic Inc.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/kinesis-firehose-cloudwatch-collection/logs/README.md b/kinesis-firehose-cloudwatch-collection/logs/README.md
new file mode 100644
index 0000000..b888f16
--- /dev/null
+++ b/kinesis-firehose-cloudwatch-collection/logs/README.md
@@ -0,0 +1,32 @@
+# sumologic-kinesis-cloudwatch-logs
+
+This CloudFormation template is used to setup aws resources required to send cloudwatch logs to Sumo Logic using Amazon Kinesis Firehose.
+
+For more details, [Sumo Logic Kinesis Firehose for Logs](https://help.sumologic.com/03Send-Data/Sources/02Sources-for-Hosted-Collectors/Amazon-Web-Services/AWS_Kinesis_Firehose_for_Logs_Source).
+
+Made with ❤️ by Sumo Logic.
+
+### Deploying the CloudFormation Template
+
+1. Sign on to the AWS Management console.
+2. Click this [URL](https://console.aws.amazon.com/cloudformation/home#/stacks/quickcreate?templateURL=https://sumologic-appdev-aws-sam-apps.s3.amazonaws.com/KinesisFirehoseCWLogs.template.yaml) to invoke the latest AWS CloudFormation template.
+3. In the parameters panel,
+
+ Section1aSumoLogicKinesisLogsURL: "Provide HTTP Source Address from AWS Kinesis Firehose for Logs source created on your Sumo Logic account."
+
+ Section2aCreateS3Bucket: "Create AWS S3 Bucket"
+ 1. Yes - Create a new AWS S3 Bucket to store failed data.
+ 2. No - Use an existing AWS S3 Bucket to store failed data.
+ Section2bFailedDataS3Bucket: "Provide a unique name of AWS S3 bucket where you would like to store Failed data.
+ In case of existing AWS S3 bucket, provide the bucket from the current AWS Account.
+ For Logs, failed data will be stored in folder prefix as SumoLogic-Kinesis-Failed-Logs."
+
+4. Click Deploy.
+
+## License
+
+Apache License 2.0 (Apache-2.0)
+
+## Support
+Requests & issues should be filed on GitHub: https://github.com/SumoLogic/sumologic-aws-lambda/issues
+
diff --git a/kinesis-firehose-cloudwatch-collection/logs/test/README.md b/kinesis-firehose-cloudwatch-collection/logs/test/README.md
new file mode 100644
index 0000000..f41be58
--- /dev/null
+++ b/kinesis-firehose-cloudwatch-collection/logs/test/README.md
@@ -0,0 +1,9 @@
+# Testing the Template
+
+This section explains how to run the test cases to verify condition based resources and output creation in your CloudFormation template.
+
+Follow below steps to run the test cases in `testtemplate.yaml` file.
+
+1. Install the [Sumo Logic CloudFormation testing framework](https://pypi.org/project/sumologic-cfn-tester/).
+2. Run the command `sumocfntester -f testtemplate.yaml`
+3. `report.json` file will be generated with result for each test case.
\ No newline at end of file
diff --git a/kinesis-firehose-cloudwatch-collection/logs/test/testtemplate.yaml b/kinesis-firehose-cloudwatch-collection/logs/test/testtemplate.yaml
new file mode 100644
index 0000000..19aa750
--- /dev/null
+++ b/kinesis-firehose-cloudwatch-collection/logs/test/testtemplate.yaml
@@ -0,0 +1,68 @@
+---
+Global:
+ TemplatePath: "../KinesisFirehoseCWLogs.template.yaml"
+ TestProjectName: BasicTestProject
+ ParallelTestsRun: 2
+ GlobalParameters:
+ Section2aCreateS3Bucket: 'No'
+ # We are providing dummy values here, as we are testing expected resources and outputs and not functionality with test cases.
+ Section1aSumoLogicKinesisLogsURL: "https://localhost.com/"
+ Section2bFailedDataS3Bucket: "cf-templates-1teub4jja8pis-us-east-1"
+Tests:
+ - TestName: installlogswithexistingbucket
+ Regions:
+ - us-east-1
+ Parameters:
+ Values:
+ Section2aCreateS3Bucket: 'No'
+ Skip: false
+ Assertions:
+ - AssertType: ResourceExistence
+ Assert:
+ Resources:
+ - FirehoseLogsRole
+ - AttachBucketPolicyToFirehoseLogsRole
+ - KinesisLogsLogGroup
+ - KinesisLogsLogStream
+ - KinesisLogsLogStreamS3
+ - KinesisLogsRole
+ - KinesisLogsRolePolicy
+ - KinesisLogsDeliveryStream
+ - KinesisLogsFirehoseRolePolicy
+ - SubscriptionFilter
+ - AssertType: OutputsCheck
+ Assert:
+ Outputs:
+ - KinesisLogsDeliveryStreamARN
+ - KinesisLogsRoleARN
+ - KinesisLogsLogGroupARN
+ - TestName: installlogswithnewbucket
+ Regions:
+ - us-east-1
+ Parameters:
+ Values:
+ Section2aCreateS3Bucket: 'Yes'
+ Section2bFailedDataS3Bucket: 'kinesis-firehose-testing-23173612910'
+ Skip: false
+ Assertions:
+ - AssertType: ResourceExistence
+ Assert:
+ Resources:
+ - FailedDataBucket
+ - FirehoseLogsRole
+ - AttachBucketPolicyToFirehoseLogsRole
+ - KinesisLogsLogGroup
+ - KinesisLogsLogStream
+ - KinesisLogsLogStreamS3
+ - KinesisLogsRole
+ - KinesisLogsRolePolicy
+ - KinesisLogsDeliveryStream
+ - KinesisLogsFirehoseRolePolicy
+ - SubscriptionFilter
+ - AssertType: OutputsCheck
+ Assert:
+ Outputs:
+ - FailedDataBucketArn
+ - KinesisLogsDeliveryStreamARN
+ - KinesisLogsRoleARN
+ - KinesisLogsLogGroupARN
\ No newline at end of file
diff --git a/kinesis-firehose-cloudwatch-collection/metrics/KinesisFirehoseCWMetrics.template.yaml b/kinesis-firehose-cloudwatch-collection/metrics/KinesisFirehoseCWMetrics.template.yaml
new file mode 100644
index 0000000..df1c0f8
--- /dev/null
+++ b/kinesis-firehose-cloudwatch-collection/metrics/KinesisFirehoseCWMetrics.template.yaml
@@ -0,0 +1,438 @@
+AWSTemplateFormatVersion: '2010-09-09'
+Description: "Template to setup the AWS CloudWatch Metrics collection using Amazon Kinesis Firehose and send the data to provided Sumo Logic URL."
+
+Metadata:
+ AWS::ServerlessRepo::Application:
+ Author: Sumo Logic
+ Description: This solution helps to setup the AWS CloudWatch Metrics collection using Amazon Kinesis Firehose and send the data to provided Sumo Logic URL.
+ HomePageUrl: https://github.com/SumoLogic/sumologic-aws-lambda
+ Labels:
+ - lambda
+ - sumologic
+ - serverless
+ - kinesis
+ - firehose
+ - cloudwatch
+ - metrics
+ Name: sumologic-kinesis-cloudwatch-metrics
+ SemanticVersion: 1.0.1
+ SourceCodeUrl: https://github.com/SumoLogic/sumologic-aws-lambda/tree/master/kinesis-firehose-cloudwatch-collection/metrics
+ LicenseUrl: ./LICENSE
+ ReadmeUrl: ./README.md
+ SpdxLicenseId: Apache-2.0
+
+ 'AWS::CloudFormation::Interface':
+ ParameterGroups:
+ - Label:
+ default: "1. Sumo Logic Kinesis Firehose Metrics Configuration"
+ Parameters:
+ - Section1aSumoLogicKinesisMetricsURL
+ - Section1bNamespaceFilter
+
+ - Label:
+ default: "2. Failed Data AWS S3 Bucket Configuration"
+ Parameters:
+ - Section2aCreateS3Bucket
+ - Section2bFailedDataS3Bucket
+
+ ParameterLabels:
+ Section1aSumoLogicKinesisMetricsURL:
+ default: "Sumo Logic AWS Kinesis Firehose for Metrics Source URL"
+ Section1bNamespaceFilter:
+ default: "Select Namespaces to collect AWS CloudWatch Metrics from"
+
+ Section2aCreateS3Bucket:
+ default: "Create AWS S3 Bucket"
+ Section2bFailedDataS3Bucket:
+ default: "AWS S3 Bucket Name for Failed Data"
+
+Parameters:
+ Section1aSumoLogicKinesisMetricsURL:
+ Type: String
+ Description: "Provide HTTP Source Address from AWS Kinesis Firehose for Metrics source created on your Sumo Logic account."
+ AllowedPattern: ".+"
+ ConstraintDescription: "Sumo Logic AWS Kinesis Firehose for Metrics Source URL can not be empty."
+ Section1bNamespaceFilter:
+ Default: ""
+ Description: "Enter in a comma-delimited list of namespaces which you want to collect AWS CloudWatch metrics from. Example: AWS/ELB, AWS/DynamoDB .... (give a list of all the ones we support). To collect all namespaces simply leave this field blank; this is the default value."
+ Type: String
+
+ Section2aCreateS3Bucket:
+ Type: String
+ Default: 'Yes'
+ Description: "Yes - Create a new AWS S3 Bucket to store failed data.'.
+ No - Use an existing AWS S3 Bucket to store failed data."
+ AllowedValues:
+ - 'Yes'
+ - 'No'
+ Section2bFailedDataS3Bucket:
+ Type: String
+ Description: "Provide a unique name of AWS S3 bucket where you would like to store Failed data. In case of existing AWS S3 bucket, provide the bucket from the current AWS Account. For Metrics, failed data will be stored in folder prefix as SumoLogic-Kinesis-Failed-Metrics."
+ AllowedPattern: "[a-z0-9-.]{3,63}$"
+ ConstraintDescription: "3-63 characters; must contain only lowercase letters, numbers, hyphen or period. For more details - https://docs.aws.amazon.com/AmazonS3/latest/userguide/bucketnamingrules.html"
+
+Conditions:
+ create_bucket: !Equals [ !Ref Section2aCreateS3Bucket, 'Yes' ]
+
+Mappings:
+ ArnValue:
+ us-east-1:
+ ArnValue: "aws"
+ us-east-2:
+ ArnValue: "aws"
+ us-west-1:
+ ArnValue: "aws"
+ us-west-2:
+ ArnValue: "aws"
+ af-south-1:
+ ArnValue: "aws"
+ ca-central-1:
+ ArnValue: "aws"
+ eu-central-1:
+ ArnValue: "aws"
+ eu-west-1:
+ ArnValue: "aws"
+ eu-west-2:
+ ArnValue: "aws"
+ eu-south-1:
+ ArnValue: "aws"
+ eu-west-3:
+ ArnValue: "aws"
+ eu-north-1:
+ ArnValue: "aws"
+ ap-east-1:
+ ArnValue: "aws"
+ ap-northeast-1:
+ ArnValue: "aws"
+ ap-northeast-2:
+ ArnValue: "aws"
+ ap-northeast-3:
+ ArnValue: "aws"
+ ap-southeast-1:
+ ArnValue: "aws"
+ ap-southeast-2:
+ ArnValue: "aws"
+ ap-south-1:
+ ArnValue: "aws"
+ me-south-1:
+ ArnValue: "aws"
+ sa-east-1:
+ ArnValue: "aws"
+ us-gov-west-1:
+ ArnValue: "aws-us-gov"
+ us-gov-east-1:
+ ArnValue: "aws-us-gov"
+ cn-north-1:
+ ArnValue: "aws-cn"
+ cn-northwest-1:
+ ArnValue: "aws-cn"
+
+Resources:
+ # Common Resources including creating bucket, create logs role and attach bucket policy.
+ FailedDataBucket:
+ Type: AWS::S3::Bucket
+ Condition: create_bucket
+ Metadata:
+ cfn_nag:
+ rules_to_suppress:
+ - id: W51
+ reason: "Role has been assigned permission to put logs in AWS S3 bucket."
+ - id: W35
+ reason: "Access logging not required for AWS S3 Bucket."
+ - id: W41
+ reason: "Encryption not required for AWS S3 Bucket."
+ Properties:
+ BucketName: !Ref Section2bFailedDataS3Bucket
+ AccessControl: Private
+ PublicAccessBlockConfiguration:
+ BlockPublicAcls: true
+ BlockPublicPolicy: true
+ IgnorePublicAcls: true
+ RestrictPublicBuckets: true
+
+ FirehoseLogsRole:
+ Type: AWS::IAM::Role
+ Properties:
+ AssumeRolePolicyDocument:
+ Version: '2012-10-17'
+ Statement:
+ - Sid: 'FirehoseAssumeRole'
+ Effect: Allow
+ Principal:
+ Service: firehose.amazonaws.com
+ Action: sts:AssumeRole
+ Condition:
+ StringEquals:
+ sts:ExternalId: !Ref "AWS::AccountId"
+
+ AttachBucketPolicyToFirehoseLogsRole:
+ Type: AWS::IAM::Policy
+ Properties:
+ PolicyName: "S3BucketPermissionPolicy"
+ PolicyDocument:
+ Version: '2012-10-17'
+ Statement:
+ - Effect: Allow
+ Action:
+ - s3:AbortMultipartUpload
+ - s3:GetBucketLocation
+ - s3:GetObject
+ - s3:ListBucket
+ - s3:ListBucketMultipartUploads
+ - s3:PutObject
+ Resource:
+ - !Sub
+ - "arn:${arn}:s3:::${S3Bucket}/*"
+ - arn: !FindInMap [ ArnValue, !Ref "AWS::Region", ArnValue ]
+ S3Bucket: !Ref Section2bFailedDataS3Bucket
+ - !Sub
+ - "arn:${arn}:s3:::${S3Bucket}"
+ - arn: !FindInMap [ ArnValue, !Ref "AWS::Region", ArnValue ]
+ S3Bucket: !Ref Section2bFailedDataS3Bucket
+ Roles:
+ - Ref: FirehoseLogsRole
+
+ # Resources for AWS Kinesis Metrics including log group, stream, delivery stream, subscription filter, policy and role.
+ KinesisMetricsLogGroup:
+ Type: AWS::Logs::LogGroup
+ Properties:
+ LogGroupName: !Sub
+ - "/aws/kinesisfirehose/Kinesis-Metrics-${StackID}"
+ - StackID: !Select
+ - 0
+ - !Split
+ - "-"
+ - !Select
+ - 2
+ - !Split [ "/", !Ref "AWS::StackId" ]
+ RetentionInDays: 7
+
+ KinesisMetricsLogStream:
+ Type: AWS::Logs::LogStream
+ Properties:
+ LogGroupName: !Ref KinesisMetricsLogGroup
+ LogStreamName: "HttpEndpointDelivery"
+
+ KinesisMetricsLogStreamS3:
+ Type: AWS::Logs::LogStream
+ Properties:
+ LogGroupName: !Ref KinesisMetricsLogGroup
+ LogStreamName: "S3Delivery"
+
+ KinesisMetricsRole:
+ Type: AWS::IAM::Role
+ Properties:
+ AssumeRolePolicyDocument:
+ Version: '2012-10-17'
+ Statement:
+ - Sid: 'MetricsAssumeRole'
+ Effect: Allow
+ Principal:
+ Service: "streams.metrics.cloudwatch.amazonaws.com"
+ Action: sts:AssumeRole
+
+ KinesisMetricsRolePolicy:
+ Type: AWS::IAM::Policy
+ Properties:
+ PolicyName: "KinesisFirehoseMetricsPolicy"
+ PolicyDocument:
+ Version: '2012-10-17'
+ Statement:
+ - Effect: Allow
+ Action:
+ - firehose:PutRecord
+ - firehose:PutRecordBatch
+ Resource: !Sub
+ - "arn:${arn}:firehose:${Region}:${AccountId}:*"
+ - arn: !FindInMap [ ArnValue, !Ref "AWS::Region", ArnValue ]
+ Region: !Ref "AWS::Region"
+ AccountId: !Ref "AWS::AccountId"
+ - Effect: Allow
+ Action:
+ - iam:PassRole
+ Resource: !Sub
+ - "arn:${arn}:iam::${AccountId}:role/${KinesisMetricsRole}"
+ - arn: !FindInMap [ ArnValue, !Ref "AWS::Region", ArnValue ]
+ AccountId: !Ref "AWS::AccountId"
+ KinesisMetricsRole: !Ref KinesisMetricsRole
+ Roles:
+ - Ref: KinesisMetricsRole
+
+ KinesisMetricsDeliveryStream:
+ Type: AWS::KinesisFirehose::DeliveryStream
+ Properties:
+ DeliveryStreamName: !Sub
+ - "Kinesis-Metrics-${StackID}"
+ - StackID: !Select
+ - 0
+ - !Split
+ - "-"
+ - !Select
+ - 2
+ - !Split [ "/", !Ref "AWS::StackId" ]
+ DeliveryStreamType: DirectPut
+ HttpEndpointDestinationConfiguration:
+ RoleARN: !GetAtt FirehoseLogsRole.Arn
+ EndpointConfiguration:
+ Url: !Ref Section1aSumoLogicKinesisMetricsURL
+ Name: !Sub "${AWS::StackName}-sumologic-metrics-endpoint"
+ RequestConfiguration:
+ ContentEncoding: GZIP
+ CloudWatchLoggingOptions:
+ Enabled: true
+ LogGroupName: !Ref KinesisMetricsLogGroup
+ LogStreamName: !Ref KinesisMetricsLogStream
+ BufferingHints:
+ IntervalInSeconds: 60
+ SizeInMBs: 1
+ RetryOptions:
+ DurationInSeconds: 60
+ S3BackupMode: FailedDataOnly
+ S3Configuration:
+ BucketARN: !If
+ - create_bucket
+ - !GetAtt FailedDataBucket.Arn
+ - !Sub
+ - "arn:${arn}:s3:::${S3Bucket}"
+ - arn: !FindInMap [ ArnValue, !Ref "AWS::Region", ArnValue ]
+ S3Bucket: !Ref Section2bFailedDataS3Bucket
+ CompressionFormat: UNCOMPRESSED
+ ErrorOutputPrefix: "SumoLogic-Kinesis-Failed-Metrics/"
+ RoleARN: !GetAtt FirehoseLogsRole.Arn
+ CloudWatchLoggingOptions:
+ Enabled: true
+ LogGroupName: !Ref KinesisMetricsLogGroup
+ LogStreamName: !Ref KinesisMetricsLogStreamS3
+
+ KinesisMetricsFirehoseRolePolicy:
+ Type: AWS::IAM::Policy
+ Properties:
+ PolicyName: "KinesisMetricsLogStreamPermissionPolicy"
+ PolicyDocument:
+ Version: '2012-10-17'
+ Statement:
+ - Effect: Allow
+ Action:
+ - logs:PutLogEvents
+ Resource:
+ - !GetAtt KinesisMetricsLogGroup.Arn
+ - !Sub "${KinesisMetricsLogGroup.Arn}:log-stream:*"
+ - Effect: Allow
+ Action:
+ - kinesis:DescribeStream
+ - kinesis:GetShardIterator
+ - kinesis:GetRecords
+ - kinesis:ListShards
+ Resource:
+ - !GetAtt KinesisMetricsDeliveryStream.Arn
+ Roles:
+ - Ref: FirehoseLogsRole
+
+ KinesisMetricsCloudWatchMetricsStreamLambdaRole:
+ Type: AWS::IAM::Role
+ Properties:
+ AssumeRolePolicyDocument:
+ Version: '2012-10-17'
+ Statement:
+ - Effect: Allow
+ Principal:
+ Service: lambda.amazonaws.com
+ Action: sts:AssumeRole
+ Condition: { }
+ Path: /
+ Policies:
+ - PolicyName: inline-policy
+ PolicyDocument:
+ Version: 2012-10-17
+ Statement:
+ - Effect: Allow
+ Action:
+ - cloudwatch:PutMetricStream
+ - cloudwatch:DeleteMetricStream
+ Resource: !Sub
+ - "arn:${arn}:cloudwatch:${Region}:${AccountId}:metric-stream/*"
+ - arn: !FindInMap [ ArnValue, !Ref "AWS::Region", ArnValue ]
+ Region: !Ref "AWS::Region"
+ AccountId: !Ref "AWS::AccountId"
+ - Effect: Allow
+ Action:
+ - iam:PassRole
+ Resource: !Sub
+ - "arn:${arn}:iam::${AccountId}:role/${KinesisMetricsRole}"
+ - arn: !FindInMap [ ArnValue, !Ref "AWS::Region", ArnValue ]
+ AccountId: !Ref "AWS::AccountId"
+ KinesisMetricsRole: !Ref KinesisMetricsRole
+ ManagedPolicyArns:
+ - arn:aws:iam::aws:policy/service-role/AWSLambdaBasicExecutionRole
+
+ KinesisMetricsCloudWatchMetricsStreamLambda:
+ Type: AWS::Lambda::Function
+ Properties:
+ Description: "The lambda function is used to create CloudWatch Metric Stream."
+ Handler: index.handler
+ Code:
+ ZipFile: |
+ import sys
+ import subprocess
+
+ subprocess.check_call([sys.executable, "-m", "pip", "install", '--upgrade', 'boto3', '--target', '/tmp/'])
+ sys.path.insert(0, '/tmp/')
+
+ import boto3
+ import cfnresponse
+
+
+ def handler(event, context):
+ response_data = {}
+ physical_id = ""
+ namespaces_list = []
+ try:
+ client = boto3.client('cloudwatch')
+ properties = event['ResourceProperties']
+ physical_id = properties['Name']
+ print("Received %s Event Type." % event['RequestType'])
+ if event['RequestType'] == "Delete":
+ response_data = client.delete_metric_stream(Name=physical_id)
+ else:
+ namespaces = properties["Namespaces"]
+ if namespaces:
+ namespaces = namespaces.split(",")
+ for namespace in namespaces:
+ namespaces_list.append({'Namespace': namespace.strip()})
+ response_data = client.put_metric_stream(Name=physical_id, FirehoseArn=properties['FirehoseArn'],
+ RoleArn=properties['RoleArn'],
+ OutputFormat=properties['OutputFormat'],
+ IncludeFilters=namespaces_list)
+ except Exception as e:
+ print(e)
+ cfnresponse.send(event, context, cfnresponse.FAILED, {"Error": str(e)}, physical_id)
+ cfnresponse.send(event, context, cfnresponse.SUCCESS, response_data, physical_id)
+ Runtime: python3.13
+ Timeout: 300
+ Role: !GetAtt KinesisMetricsCloudWatchMetricsStreamLambdaRole.Arn
+
+ KinesisMetricsCloudWatchMetricsStream:
+ Type: AWS::CloudFormation::CustomResource
+ Properties:
+ ServiceToken: !GetAtt KinesisMetricsCloudWatchMetricsStreamLambda.Arn
+ Name: !Ref "AWS::StackName"
+ OutputFormat: 'opentelemetry0.7'
+ FirehoseArn: !GetAtt KinesisMetricsDeliveryStream.Arn
+ RoleArn: !GetAtt KinesisMetricsRole.Arn
+ Namespaces: !Ref Section1bNamespaceFilter
+
+Outputs:
+ FailedDataBucketArn:
+ Description: "S3 Bucket Arn where failed deliveries will be saved"
+ Condition: create_bucket
+ Value: !GetAtt FailedDataBucket.Arn
+
+ KinesisMetricsDeliveryStreamARN:
+ Description: "The ARN for your Kinesis Firehose Delivery Stream, use this as the destination when adding CloudWatch Metrics subscription filters."
+ Value: !GetAtt KinesisMetricsDeliveryStream.Arn
+ KinesisMetricsRoleARN:
+ Description: "The ARN for your CloudWatch Logs role to write to your delivery stream, use this as the role-arn when adding CloudWatch Logs subscription filters."
+ Value: !GetAtt KinesisMetricsRole.Arn
+ KinesisMetricsLogGroupARN:
+ Description: "The CloudWatch log group name where kinesis stream logs will be sent."
+ Value: !Ref KinesisMetricsLogGroup
diff --git a/kinesis-firehose-cloudwatch-collection/metrics/LICENSE b/kinesis-firehose-cloudwatch-collection/metrics/LICENSE
new file mode 100644
index 0000000..ba07b59
--- /dev/null
+++ b/kinesis-firehose-cloudwatch-collection/metrics/LICENSE
@@ -0,0 +1,201 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright 2021 Sumo Logic Inc.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/kinesis-firehose-cloudwatch-collection/metrics/README.md b/kinesis-firehose-cloudwatch-collection/metrics/README.md
new file mode 100644
index 0000000..c2a5656
--- /dev/null
+++ b/kinesis-firehose-cloudwatch-collection/metrics/README.md
@@ -0,0 +1,33 @@
+# sumologic-kinesis-cloudwatch-metrics
+
+This CloudFormation template is used to setup aws resources required to send cloudwatch metrics to Sumo Logic using Amazon Kinesis Firehose.
+
+For more details, [Sumo Logic Kinesis Firehose for Metrics](https://help.sumologic.com/03Send-Data/Sources/02Sources-for-Hosted-Collectors/Amazon-Web-Services/AWS_Kinesis_Firehose_for_Metrics_Source).
+
+Made with ❤️ by Sumo Logic.
+
+### Deploying the CloudFormation Template
+
+1. Sign on to the AWS Management console.
+2. Click this [URL](https://console.aws.amazon.com/cloudformation/home#/stacks/quickcreate?templateURL=https://sumologic-appdev-aws-sam-apps.s3.amazonaws.com/KinesisFirehoseCWMetrics.template.yaml) to invoke the latest AWS CloudFormation template.
+3. In the parameters panel,
+
+ Section1aSumoLogicKinesisMetricsURL: "Provide HTTP Source Address from AWS Kinesis Firehose for Metrics source created on your Sumo Logic account."
+ Section1bNamespaceFilter: "Enter in a comma-delimited list of namespaces which you want to collect AWS CloudWatch metrics from. Example: AWS/ELB, AWS/DynamoDB .... (give a list of all the ones we support).
+ To collect all namespaces simply leave this field blank; this is the default value."
+
+ Section2aCreateS3Bucket: "Create AWS S3 Bucket"
+ 1. Yes - Create a new AWS S3 Bucket to store failed data.
+ 2. No - Use an existing AWS S3 Bucket to store failed data.
+ Section2bFailedDataS3Bucket: "Provide a unique name of AWS S3 bucket where you would like to store Failed data.
+ In case of existing AWS S3 bucket, provide the bucket from the current AWS Account.
+ For Metrics, failed data will be stored in folder prefix as SumoLogic-Kinesis-Failed-Metrics."
+4. Click Deploy.
+
+## License
+
+Apache License 2.0 (Apache-2.0)
+
+## Support
+Requests & issues should be filed on GitHub: https://github.com/SumoLogic/sumologic-aws-lambda/issues
+
diff --git a/kinesis-firehose-cloudwatch-collection/metrics/test/README.md b/kinesis-firehose-cloudwatch-collection/metrics/test/README.md
new file mode 100644
index 0000000..f41be58
--- /dev/null
+++ b/kinesis-firehose-cloudwatch-collection/metrics/test/README.md
@@ -0,0 +1,9 @@
+# Testing the Template
+
+This section explains how to run the test cases to verify condition based resources and output creation in your CloudFormation template.
+
+Follow below steps to run the test cases in `testtemplate.yaml` file.
+
+1. Install the [Sumo Logic CloudFormation testing framework](https://pypi.org/project/sumologic-cfn-tester/).
+2. Run the command `sumocfntester -f testtemplate.yaml`
+3. `report.json` file will be generated with result for each test case.
\ No newline at end of file
diff --git a/kinesis-firehose-cloudwatch-collection/metrics/test/testtemplate.yaml b/kinesis-firehose-cloudwatch-collection/metrics/test/testtemplate.yaml
new file mode 100644
index 0000000..f621d92
--- /dev/null
+++ b/kinesis-firehose-cloudwatch-collection/metrics/test/testtemplate.yaml
@@ -0,0 +1,72 @@
+---
+Global:
+ TemplatePath: "../KinesisFirehoseCWMetrics.template.yaml"
+ TestProjectName: BasicTestProject
+ ParallelTestsRun: 3
+ GlobalParameters:
+ Section2aCreateS3Bucket: 'No'
+ # We are providing dummy values here, as we are testing expected resources and outputs and not functionality with test cases.
+ Section1aSumoLogicKinesisMetricsURL: "https://localhost.com/"
+ Section2bFailedDataS3Bucket: "cf-templates-1teub4jja8pis-us-east-1"
+Tests:
+ - TestName: installMetricswithexistingbucket
+ Regions:
+ - us-east-1
+ Parameters:
+ Values:
+ Section2aCreateS3Bucket: 'No'
+ Skip: false
+ Assertions:
+ - AssertType: ResourceExistence
+ Assert:
+ Resources:
+ - FirehoseLogsRole
+ - AttachBucketPolicyToFirehoseLogsRole
+ - KinesisMetricsLogGroup
+ - KinesisMetricsLogStream
+ - KinesisMetricsLogStreamS3
+ - KinesisMetricsRole
+ - KinesisMetricsRolePolicy
+ - KinesisMetricsDeliveryStream
+ - KinesisMetricsFirehoseRolePolicy
+ - KinesisMetricsCloudWatchMetricsStream
+ - KinesisMetricsCloudWatchMetricsStreamLambdaRole
+ - KinesisMetricsCloudWatchMetricsStreamLambda
+ - AssertType: OutputsCheck
+ Assert:
+ Outputs:
+ - KinesisMetricsDeliveryStreamARN
+ - KinesisMetricsRoleARN
+ - KinesisMetricsLogGroupARN
+ - TestName: installmetricswithnewbucket
+ Regions:
+ - us-east-1
+ Parameters:
+ Values:
+ Section2aCreateS3Bucket: 'Yes'
+ Section2bFailedDataS3Bucket: 'kinesis-firehose-testing-792073615729'
+ Skip: false
+ Assertions:
+ - AssertType: ResourceExistence
+ Assert:
+ Resources:
+ - FailedDataBucket
+ - FirehoseLogsRole
+ - AttachBucketPolicyToFirehoseLogsRole
+ - KinesisMetricsLogGroup
+ - KinesisMetricsLogStream
+ - KinesisMetricsLogStreamS3
+ - KinesisMetricsRole
+ - KinesisMetricsRolePolicy
+ - KinesisMetricsDeliveryStream
+ - KinesisMetricsFirehoseRolePolicy
+ - KinesisMetricsCloudWatchMetricsStream
+ - KinesisMetricsCloudWatchMetricsStreamLambdaRole
+ - KinesisMetricsCloudWatchMetricsStreamLambda
+ - AssertType: OutputsCheck
+ Assert:
+ Outputs:
+ - FailedDataBucketArn
+ - KinesisMetricsDeliveryStreamARN
+ - KinesisMetricsRoleARN
+ - KinesisMetricsLogGroupARN
\ No newline at end of file
diff --git a/kinesis-firehose-cloudwatch-collection/sam/publishsam.sh b/kinesis-firehose-cloudwatch-collection/sam/publishsam.sh
new file mode 100755
index 0000000..1dc53d0
--- /dev/null
+++ b/kinesis-firehose-cloudwatch-collection/sam/publishsam.sh
@@ -0,0 +1,52 @@
+#!/bin/bash
+
+export AWS_REGION="us-east-1"
+# IMPORTANT - Update the profile where you would like to deploy SAM app.
+export AWS_PROFILE="personal"
+
+# IMPORTANT - Update the bucket value based on aws account you are deploying SAM app in.
+if [[ "${AWS_PROFILE}" == "personal" ]]
+then
+ SAM_S3_BUCKET=""
+else
+ SAM_S3_BUCKET="appdevstore"
+fi
+
+# define all application names that needs to be published.
+app_names=(
+ "KinesisFirehoseCWLogs:logs" "KinesisFirehoseCWMetrics:metrics"
+)
+
+sam --version
+
+# Regex to deploy only expected templates.
+match_case="KinesisFirehoseCWMetrics"
+
+for app_name in "${app_names[@]}"
+do
+ KEY="${app_name%%:*}"
+ VALUE="${app_name##*:}"
+
+ template_path="${KEY}.template.yaml"
+ packaged_path="packaged.yaml"
+
+ if [[ "${KEY}" == *"${match_case}"* ]]; then
+ # Grep Version from the SAM Template.
+ cd ../"${VALUE}" || exit
+
+ export version=`grep AWS::ServerlessRepo::Application: ${template_path} -A 20 | grep SemanticVersion | cut -d ':' -f 2 | xargs`
+ echo "Package and publish the Template file ${KEY} with version ${version}."
+
+ sam validate -t ${template_path}
+
+ sam build -t ${template_path}
+
+ sam package --profile ${AWS_PROFILE} --template-file .aws-sam/build/template.yaml --s3-bucket ${SAM_S3_BUCKET} --output-template-file ${packaged_path} \
+ --s3-prefix "${KEY}/v${version}" --region $AWS_REGION
+
+ sam publish --template ${packaged_path} --region ${AWS_REGION} --semantic-version ${version} --profile ${AWS_PROFILE}
+ echo "Publish done"
+ fi
+ cd ../sam || exit
+done
+
diff --git a/kinesis/README.md b/kinesis/README.md
index a80193f..4bd5bc3 100644
--- a/kinesis/README.md
+++ b/kinesis/README.md
@@ -1,6 +1,10 @@
-===========================================
-Kinesis to Sumo Logic
-===========================================
+
+# Kinesis to Sumo Logic
+
+## Warning: This Lambda Function is no longer recommended solution. Recommended solution is to use [AWS Kinesis Firehose for Logs](https://help-opensource.sumologic.com/docs/send-data/hosted-collectors/amazon-aws/aws-kinesis-firehose-logs-source/)
+
+
+
This function is invoked by AWS Lambda after it detects new records in Kinesis stream. The received collection of events are decompressed, transformed and send to Sumo Logic HTTP source endpoint.
Files
diff --git a/loggroup-lambda-connector/Readme.md b/loggroup-lambda-connector/Readme.md
index d23afe4..2041cff 100644
--- a/loggroup-lambda-connector/Readme.md
+++ b/loggroup-lambda-connector/Readme.md
@@ -1,6 +1,11 @@
# SumoLogic LogGroup Connector
This is used to automatically subscribe newly created and existing Cloudwatch LogGroups to a Lambda function.
+> **Note:**
+For existing CloudWatch LogGroups, a Lambda function can subscribe to up to 65,000 LogGroups.
+If the number of LogGroups exceeds 65,000, you can request to disable Lambda recursive loop detection by [contact AWS Support](https://repost.aws/knowledge-center/aws-phone-support).
+
+
Made with ❤️ by Sumo Logic. Available on the [AWS Serverless Application Repository](https://aws.amazon.com/serverless)
### Deploying the SAM Application
@@ -20,13 +25,30 @@ Made with ❤️ by Sumo Logic. Available on the [AWS Serverless Application Rep
### Configuring Lambda
-It has two environment variables
-
-**LOG_GROUP_PATTERN**: This is a javascript regex to filter out loggroups. Only loggroups which match this pattern will be subscribed to the lambda function.Do not use '/' while writing the pattern and it is case insensitive.
-
-```
- Test - will match testlogroup, logtestgroup and LogGroupTest
+#### Environment variables
+
+**LOG_GROUP_PATTERN**: This JavaScript regex is used to filter log groups. Only log groups that match this pattern will be subscribed to the Lambda function. The default value is `Test`, which will match log groups like `testlogroup`, `logtestgroup`, and `LogGroupTest`.
+
+##### Use Cases and it's Regex Pattern Example
+
+| Case Description | Regex Pattern Example |
+|----------------------------------------------------------------------|-------------------------------------|
+| To subscribe all loggroup | `/*` or (leave empty) |
+| To subscribe all loggroup paths only | `/` |
+| To subscribe all loggroup of aws services | `/aws/*` |
+| To subscribe to loggroups for only one service, such as Lambda | `/aws/lambda/*` |
+| To subscribe loggroup multiple services like lambda, rds, apigateway | `/aws/(lambda\|rds\|apigateway)` |
+| To subscribe loggroup by key word like `Test` or `Prod` | `Test` or `Prod` [Case insensitive] |
+| Don't subscribe if `LOG_GROUP_PATTERN` | `^$` |
+
+**LOG_GROUP_TAGS**: This is used to filter log groups based on tags. Only log groups that match any of the specified key-value pairs will be subscribed to the Lambda function. It is case-sensitive.
+#### For example
+```bash
+LOG_GROUP_TAGS="Environment=Production,Application=MyApp"
```
+> 💡 **Tip**: To filter log groups based on tags only, set `LOG_GROUP_PATTERN=^$`.
+
+> **Note**: `LOG_GROUP_PATTERN` and `LOG_GROUP_TAGS` can be used together to subscribe to log groups or can be used separately.
**DESTINATION_ARN**: This specifies ARN of the Destination to Subscribe the log group.
@@ -49,8 +71,6 @@ Lambda Destination ARN :- This specifies ARN of the Lambda function. Also you ha
Kinesis Destination ARN :- This specifies the ARN of the kinesis Stream.
-**LOG_GROUP_TAGS**: This is used for filtering out loggroups based on tags.Only loggroups which match any one of the key value pairs will be subscribed to the lambda function. This works only for new loggroups not existing loggroups.
-
**ROLE_ARN** : This is used when subscription destination ARN is kinesis firehose stream.
### For Developers
diff --git a/loggroup-lambda-connector/sam/packaged.yaml b/loggroup-lambda-connector/sam/packaged.yaml
index 3580450..0bda684 100644
--- a/loggroup-lambda-connector/sam/packaged.yaml
+++ b/loggroup-lambda-connector/sam/packaged.yaml
@@ -21,11 +21,11 @@ Metadata:
- serverless
- loggroups
- cloudwatch
- LicenseUrl: s3://appdevstore/LoggroupConnector/v1.0.5/6092dd6c323e33634657102f570628e0
+ LicenseUrl: s3://appdevstore/LoggroupConnector/v1.0.15/6092dd6c323e33634657102f570628e0
Name: sumologic-loggroup-connector
- ReadmeUrl: s3://appdevstore/LoggroupConnector/v1.0.5/5a9a6e956be7449cbd5f8653e4475071
- SemanticVersion: 1.0.5
- SourceCodeUrl: https://github.com/SumoLogic/sumologic-aws-lambda/loggroup-lambda-connector
+ ReadmeUrl: s3://appdevstore/LoggroupConnector/v1.0.15/15e6b49afe3116d3ff8861099b34b973
+ SemanticVersion: 1.0.15
+ SourceCodeUrl: https://github.com/SumoLogic/sumologic-aws-lambda/tree/main/loggroup-lambda-connector
SpdxLicenseId: Apache-2.0
Parameters:
DestinationArnType:
@@ -107,9 +107,9 @@ Resources:
SumoLogGroupLambdaConnector:
Type: AWS::Serverless::Function
Properties:
- CodeUri: s3://appdevstore/LoggroupConnector/v1.0.5/8cd9d2fd91333ef1c32307ccb5f6bbd3
+ CodeUri: s3://appdevstore/LoggroupConnector/v1.0.15/5a44aebff6ae18483b1b5d082d112e85
Handler: loggroup-lambda-connector.handler
- Runtime: nodejs12.x
+ Runtime: nodejs22.x
Environment:
Variables:
DESTINATION_ARN:
@@ -130,6 +130,7 @@ Resources:
- logs:DescribeLogGroups
- logs:DescribeLogStreams
- logs:PutSubscriptionFilter
+ - logs:ListTagsLogGroup
Resource:
- Fn::Sub: arn:aws:logs:${AWS::Region}:${AWS::AccountId}:log-group:*
- Sid: InvokePolicy
@@ -137,7 +138,7 @@ Resources:
Action:
- lambda:InvokeFunction
Resource:
- - Fn::Sub: arn:aws:lambda:${AWS::Region}:${AWS::AccountId}:function:*
+ - Fn::Sub: arn:aws:lambda:${AWS::Region}:${AWS::AccountId}:function:*SumoLogGroupLambda*
Events:
LambdaTrigger:
Type: CloudWatchEvent
@@ -150,6 +151,8 @@ Resources:
- logs.amazonaws.com
eventName:
- CreateLogGroup
+ Metadata:
+ SamResourceId: SumoLogGroupLambdaConnector
sumoIAMPassRolePolicy:
Type: AWS::IAM::Policy
Condition: create_pass_role
@@ -165,6 +168,8 @@ Resources:
Ref: RoleArn
Roles:
- Ref: SumoLogGroupLambdaConnectorRole
+ Metadata:
+ SamResourceId: sumoIAMPassRolePolicy
SumoCWLambdaInvokePermission:
Type: AWS::Lambda::Permission
Condition: create_invoke_permission
@@ -178,23 +183,26 @@ Resources:
Ref: AWS::AccountId
SourceArn:
Fn::Sub: arn:aws:logs:${AWS::Region}:${AWS::AccountId}:log-group:*:*
+ Metadata:
+ SamResourceId: SumoCWLambdaInvokePermission
SumoLogGroupExistingLambdaConnector:
Type: AWS::Serverless::Function
Condition: invoke_existing
Properties:
- InlineCode: "var aws = require('aws-sdk');\nvar response = require('cfn-response');\n\
- \nexports.handler = function(event, context) {\n var lambda = new aws.Lambda();\n\
- \ var payload = {\"existingLogs\": \"true\", \"token\": \"\"};\n var\
- \ responseStatus = \"FAILED\";\n var responseData = {};\n lambda.invoke(\n\
- \ {\n InvocationType: 'Event',\n FunctionName: process.env.FUNCTION_NAME,\n\
- \ Payload: JSON.stringify(payload),\n }, function(err, invokeResult)\
- \ {\n if (err) {\n responseData = {Error: \"Invoke call\
- \ failed\"};\n console.log(responseData.Error + \":\\n\", err);\n\
- \ }\n else {\n responseStatus = \"SUCCESS\";\n\
- \ }\n response.send(event, context, responseStatus, responseData);\n\
- \ });\n};\n"
+ InlineCode: "const { LambdaClient, InvokeCommand } = require(\"@aws-sdk/client-lambda\"\
+ );\nvar response = require('cfn-response');\n\nconst lambda = new LambdaClient({\
+ \ apiVersion: '2015-03-31' });\n\nexports.handler = async function (event,\
+ \ context) {\n const payload = { \"existingLogs\": \"true\", \"token\": \"\
+ \" };\n const responseData = {};\n let responseStatus = \"FAILED\";\n\n\
+ \ try {\n const invokeInput = new InvokeCommand({\n InvocationType:\
+ \ 'Event',\n FunctionName: process.env.FUNCTION_NAME,\n Payload:\
+ \ JSON.stringify(payload), // Convert to DynamoDB Marshaler format\n \
+ \ });\n await lambda.send(invokeInput);\n\n responseStatus = \"SUCCESS\"\
+ ;\n } catch (err) {\n responseData.Error = \"Invoke call failed\";\n \
+ \ console.log(responseData.Error + \":\\n\", err);\n }\n \n await response.send(event,\
+ \ context, responseStatus, responseData);\n};\n"
Handler: index.handler
- Runtime: nodejs12.x
+ Runtime: nodejs22.x
Environment:
Variables:
FUNCTION_NAME:
@@ -207,6 +215,8 @@ Resources:
- lambda:InvokeFunction
Resource:
- Fn::Sub: arn:aws:lambda:${AWS::Region}:${AWS::AccountId}:function:${SumoLogGroupLambdaConnector}
+ Metadata:
+ SamResourceId: SumoLogGroupExistingLambdaConnector
InvokeLambdaConnector:
Type: AWS::CloudFormation::CustomResource
Version: '1.0'
@@ -224,6 +234,8 @@ Resources:
Ref: LogGroupPattern
ROLE_ARN:
Ref: RoleArn
+ Metadata:
+ SamResourceId: InvokeLambdaConnector
Outputs:
SumoLogGroupLambdaConnector:
Description: SumoLogGroupLambdaConnector Function ARN
diff --git a/loggroup-lambda-connector/sam/sam_package.sh b/loggroup-lambda-connector/sam/sam_package.sh
old mode 100644
new mode 100755
index 39a078d..465d9d3
--- a/loggroup-lambda-connector/sam/sam_package.sh
+++ b/loggroup-lambda-connector/sam/sam_package.sh
@@ -1,5 +1,6 @@
#!/bin/bash
+export AWS_PROFILE="prod"
if [ "$AWS_PROFILE" == "prod" ]
then
SAM_S3_BUCKET="appdevstore"
@@ -9,9 +10,9 @@ else
AWS_REGION="us-east-2"
fi
-version="1.0.4"
+version="1.0.15"
-sam package --template-file template.yaml --s3-bucket $SAM_S3_BUCKET --output-template-file packaged.yaml --s3-prefix "LoggroupConnector/v$version"
+sam package --template-file template.yaml --s3-bucket $SAM_S3_BUCKET --output-template-file packaged.yaml --s3-prefix "LoggroupConnector/v$version" --region $AWS_REGION
# sam deploy --template-file packaged.yaml --stack-name testingloggrpconnector --capabilities CAPABILITY_IAM --region $AWS_REGION --parameter-overrides LambdaARN="arn:aws:lambda:us-east-1:956882708938:function:SumoCWLogsLambda" LogGroupTags="env=prod,name=apiassembly" LogGroupPattern="test"
diff --git a/loggroup-lambda-connector/sam/template.yaml b/loggroup-lambda-connector/sam/template.yaml
index bc0ee18..30a3fd8 100644
--- a/loggroup-lambda-connector/sam/template.yaml
+++ b/loggroup-lambda-connector/sam/template.yaml
@@ -23,9 +23,9 @@ Metadata:
- cloudwatch
LicenseUrl: ../LICENSE
Name: sumologic-loggroup-connector
- ReadmeUrl: ../README.md
- SemanticVersion: 1.0.5
- SourceCodeUrl: https://github.com/SumoLogic/sumologic-aws-lambda/loggroup-lambda-connector
+ ReadmeUrl: ../Readme.md
+ SemanticVersion: 1.0.15
+ SourceCodeUrl: https://github.com/SumoLogic/sumologic-aws-lambda/tree/main/loggroup-lambda-connector
SpdxLicenseId: Apache-2.0
Parameters:
@@ -99,7 +99,7 @@ Resources:
Properties:
CodeUri: ../src/
Handler: "loggroup-lambda-connector.handler"
- Runtime: nodejs12.x
+ Runtime: nodejs22.x
Environment:
Variables:
DESTINATION_ARN: !Ref "DestinationArnValue"
@@ -114,6 +114,7 @@ Resources:
- logs:DescribeLogGroups
- logs:DescribeLogStreams
- logs:PutSubscriptionFilter
+ - logs:ListTagsLogGroup
Resource:
- !Sub 'arn:aws:logs:${AWS::Region}:${AWS::AccountId}:log-group:*'
- Sid: InvokePolicy
@@ -121,7 +122,7 @@ Resources:
Action:
- lambda:InvokeFunction
Resource:
- - !Sub 'arn:aws:lambda:${AWS::Region}:${AWS::AccountId}:function:*'
+ - !Sub 'arn:aws:lambda:${AWS::Region}:${AWS::AccountId}:function:*SumoLogGroupLambda*'
Events:
LambdaTrigger:
Type: CloudWatchEvent
@@ -165,32 +166,34 @@ Resources:
Condition: invoke_existing
Properties:
InlineCode: |
- var aws = require('aws-sdk');
+ const { LambdaClient, InvokeCommand } = require("@aws-sdk/client-lambda");
var response = require('cfn-response');
- exports.handler = function(event, context) {
- var lambda = new aws.Lambda();
- var payload = {"existingLogs": "true", "token": ""};
- var responseStatus = "FAILED";
- var responseData = {};
- lambda.invoke(
- {
- InvocationType: 'Event',
- FunctionName: process.env.FUNCTION_NAME,
- Payload: JSON.stringify(payload),
- }, function(err, invokeResult) {
- if (err) {
- responseData = {Error: "Invoke call failed"};
- console.log(responseData.Error + ":\n", err);
- }
- else {
- responseStatus = "SUCCESS";
- }
- response.send(event, context, responseStatus, responseData);
- });
+ const lambda = new LambdaClient({ apiVersion: '2015-03-31' });
+
+ exports.handler = async function (event, context) {
+ const payload = { "existingLogs": "true", "token": "" };
+ const responseData = {};
+ let responseStatus = "FAILED";
+
+ try {
+ const invokeInput = new InvokeCommand({
+ InvocationType: 'Event',
+ FunctionName: process.env.FUNCTION_NAME,
+ Payload: JSON.stringify(payload), // Convert to DynamoDB Marshaler format
+ });
+ await lambda.send(invokeInput);
+
+ responseStatus = "SUCCESS";
+ } catch (err) {
+ responseData.Error = "Invoke call failed";
+ console.log(responseData.Error + ":\n", err);
+ }
+
+ await response.send(event, context, responseStatus, responseData);
};
Handler: "index.handler"
- Runtime: nodejs12.x
+ Runtime: nodejs22.x
Environment:
Variables:
FUNCTION_NAME: !Ref SumoLogGroupLambdaConnector
diff --git a/loggroup-lambda-connector/src/loggroup-lambda-connector.js b/loggroup-lambda-connector/src/loggroup-lambda-connector.js
index 6fe170a..d5020fe 100644
--- a/loggroup-lambda-connector/src/loggroup-lambda-connector.js
+++ b/loggroup-lambda-connector/src/loggroup-lambda-connector.js
@@ -1,18 +1,93 @@
-var AWS = require("aws-sdk");
-const util = require("util");
-var cwl = new AWS.CloudWatchLogs({apiVersion: '2014-03-28'});
-var maxRetryCounter = 3;
-
-async function createSubscriptionFilter(lambdaLogGroupName, destinationArn, roleArn) {
- if (destinationArn.startsWith("arn:aws:lambda")){
- var params = {
+const { CloudWatchLogsClient, PutSubscriptionFilterCommand, DescribeLogGroupsCommand, ListTagsLogGroupCommand } = require("@aws-sdk/client-cloudwatch-logs");
+const { LambdaClient, InvokeCommand } = require("@aws-sdk/client-lambda");
+
+const cwl = new CloudWatchLogsClient();
+const lambda = new LambdaClient({ apiVersion: '2015-03-31' }); // Update to the appropriate Lambda API version you require
+const maxRetryCounter = 3;
+const timeoutThreshold = 12000;
+
+
+function validateRegex(pattern) {
+ try {
+ // Attempt to create a RegExp object with the provided pattern
+ return new RegExp(pattern, "i");
+ } catch (e) {
+ // Throw an error with a descriptive message if the pattern is invalid
+ throw new Error(`Invalid regular expression pattern: ${pattern}. Error: ${e.message}`);
+ }
+}
+
+async function getTagsByLogGroupName(logGroupName, retryCounter=0) {
+ var tags = {};
+ const input = {
+ logGroupName: logGroupName, // required
+ };
+ try {
+ // ListTagsLogGroupRequest
+ let response = await cwl.send(new ListTagsLogGroupCommand(input));
+ tags = response.tags
+ } catch (err) {
+ if (err && err.message === "Rate exceeded" && retryCounter <= maxRetryCounter) {
+ retryCounter += 1
+ const delayTime = Math.pow(2, retryCounter) * 2000; // Exponential backoff
+ console.log(`ThrottlingException encountered for ${logGroupName}. Retrying in ${delayTime}ms...Attempt ${retryCounter}/${maxRetryCounter}`);
+ await delay(delayTime);
+ await getTagsByLogGroupName(logGroupName, retryCounter);
+ } else {
+ console.error(`Failed to get tags for ${logGroupName} due to ${err}`)
+ }
+ }
+ return tags
+}
+
+function IsTagMatchToLogGroup(tagMatcherForLogGroup, logGroupTags) {
+ if (tagMatcherForLogGroup && logGroupTags) {
+ let tagMatcherList = tagMatcherForLogGroup.split(",");
+ console.log("logGroupTags: ", logGroupTags);
+ let tag, key, value;
+ for (let i = 0; i < tagMatcherList.length; i++) {
+ tag = tagMatcherList[i].split("=");
+ key = tag[0].trim();
+ value = tag[1].trim();
+ if (logGroupTags[key] && logGroupTags[key] == value) {
+ return true;
+ }
+ }
+ }
+ return false;
+}
+
+async function filterExistingLogGroups(logGroupName, logGroupRegex) {
+ if (logGroupName.match(logGroupRegex)) {
+ return true;
+ }
+ var logGroupTags = await getTagsByLogGroupName(logGroupName)
+ var tagMatcherForLogGroup = process.env.LOG_GROUP_TAGS
+ console.log("Filtering log group:", logGroupName, "with tags:", logGroupTags);
+ return IsTagMatchToLogGroup(tagMatcherForLogGroup, logGroupTags)
+}
+
+function filterNewLogGroups(event, logGroupRegex) {
+ var logGroupName = event.detail.requestParameters.logGroupName;
+ if (logGroupName.match(logGroupRegex) && event.detail.eventName === "CreateLogGroup") {
+ return true;
+ }
+ var logGroupTags = event.detail.requestParameters.tags;
+ var tagMatcherForLogGroup = process.env.LOG_GROUP_TAGS
+ return IsTagMatchToLogGroup(tagMatcherForLogGroup, logGroupTags)
+}
+
+async function createSubscriptionFilter(lambdaLogGroupName, destinationArn, roleArn, additionalArgs) {
+ var params={};
+ if (destinationArn.startsWith("arn:aws:lambda")) {
+ params = {
destinationArn: destinationArn,
filterName: 'SumoLGLBDFilter',
filterPattern: '',
logGroupName: lambdaLogGroupName
};
} else {
- var params = {
+ params = {
destinationArn: destinationArn,
filterName: 'SumoLGLBDFilter',
filterPattern: '',
@@ -21,9 +96,11 @@ async function createSubscriptionFilter(lambdaLogGroupName, destinationArn, role
};
}
- // handle case where subscription filter exists/case where loggroup generated by target lambda
+ // handle the case where the subscription filter exists / case where the log group is generated by the target lambda
try {
- await util.promisify(cwl.putSubscriptionFilter.bind(cwl))(params);
+ const cmd = new PutSubscriptionFilterCommand(params);
+ await cwl.send(cmd);
+ additionalArgs.subscribeCount += 1
console.log("Successfully subscribed logGroup: ", lambdaLogGroupName);
} catch (err) {
console.log("Error in subscribing", lambdaLogGroupName, err);
@@ -31,128 +108,139 @@ async function createSubscriptionFilter(lambdaLogGroupName, destinationArn, role
}
}
-function filterLogGroups(event, logGroupRegex) {
- logGroupRegex = new RegExp(logGroupRegex, "i");
- let logGroupName = event.detail.requestParameters.logGroupName;
- if (logGroupName.match(logGroupRegex) && event.detail.eventName === "CreateLogGroup") {
- return true;
- }
- let lg_tags = event.detail.requestParameters.tags;
- if (process.env.LOG_GROUP_TAGS && lg_tags) {
- console.log("tags in loggroup: ", lg_tags);
- var tags_array = process.env.LOG_GROUP_TAGS.split(",");
- let tag, key, value;
- for (let i = 0; i < tags_array.length; i++) {
- tag = tags_array[i].split("=");
- key = tag[0].trim();
- value = tag[1].trim();
- if (lg_tags[key] && lg_tags[key]==value) {
- return true;
- }
- }
- }
- return false;
-}
-
-async function subscribeExistingLogGroups(logGroups, retryCounter) {
- var logGroupRegex = new RegExp(process.env.LOG_GROUP_PATTERN, "i");
+async function subscribeExistingLogGroups(logGroups, retryCounter, additionalArgs) {
+ var logGroupRegex = validateRegex(process.env.LOG_GROUP_PATTERN);
+ console.log("logGroupRegexPattern: ", logGroupRegex);
var destinationArn = process.env.DESTINATION_ARN;
var roleArn = process.env.ROLE_ARN;
const failedLogGroupNames = [];
await logGroups.reduce(async (previousPromise, nextLogGroup) => {
await previousPromise;
const { logGroupName } = nextLogGroup;
- if (!logGroupName.match(logGroupRegex)) {
- console.log("Unmatched logGroup: ", logGroupName);
- return Promise.resolve();
- } else {
- return createSubscriptionFilter(logGroupName, destinationArn, roleArn).catch( function (err) {
- if (err && err.code == "ThrottlingException") {
- failedLogGroupNames.push({logGroupName: logGroupName});
+ let filterStatus = await filterExistingLogGroups(logGroupName, logGroupRegex);
+ if (filterStatus) {
+ return createSubscriptionFilter(logGroupName, destinationArn, roleArn, additionalArgs).catch(function (err) {
+ if (err && err.message === "Rate exceeded") {
+ failedLogGroupNames.push({ logGroupName: logGroupName });
}
});
+ } else {
+ console.log("Unmatched logGroup: ", logGroupName);
+ return Promise.resolve();
}
}, Promise.resolve());
if (retryCounter <= maxRetryCounter && failedLogGroupNames.length > 0) {
console.log("Retrying Subscription for Failed Log Groups due to throttling with counter number as " + retryCounter);
- await subscribeExistingLogGroups(failedLogGroupNames, retryCounter + 1);
+ await subscribeExistingLogGroups(failedLogGroupNames, retryCounter + 1, additionalArgs);
}
}
-function processExistingLogGroups(token, context, errorHandler) {
- var params = {limit: 50};
+async function processExistingLogGroups(context, token, additionalArgs, errorHandler) {
+ var params = { limit: 50 };
if (token) {
- params = {
- limit: 50,
- // logGroupNamePrefix: '',
- nextToken: token
- };
+ params = {
+ limit: 50,
+ nextToken: token
+ };
}
- var p = new Promise(function(resolve, reject) {
- cwl.describeLogGroups(params, function(err, data) {
- if (err) {
- console.log("error in fetching logGroups", err, err.stack);
- reject(err);
- } else {
- console.log("fetched logGroups: " + data.logGroups.length + " nextToken: " + data.nextToken);
- resolve(data);
- }
- });
- });
- var cb = async function (data) {
- await subscribeExistingLogGroups(data.logGroups, 1);
- if (data.nextToken) {// if next set of log groups exists, invoke next instance of lambda
- console.log("Log Groups remaining...Calling the lambda again with token " + data.nextToken);
- invoke_lambda(context, data.nextToken, errorHandler);
- console.log("Lambda invoke complete with token " + data.nextToken);
- } else {
- console.log("All Log Groups are subscribed to Destination Type " + process.env.DESTINATION_ARN);
- errorHandler(null, "Success");
+
+ try {
+ console.log("Previous record count " + additionalArgs.recordCount);
+ const data = await cwl.send(new DescribeLogGroupsCommand(params));
+ additionalArgs.recordCount += data.logGroups.length;
+ console.log("Updated record count " + additionalArgs.recordCount);
+ await subscribeExistingLogGroups(data.logGroups, 1, additionalArgs);
+ console.log("Updated subscribeCount " + additionalArgs.subscribeCount);
+ if (data.nextToken) {
+ const remainingTime = context.getRemainingTimeInMillis(); // 60000
+ const diffTime = remainingTime - timeoutThreshold // 14552-12000=2792
+ if (diffTime < timeoutThreshold) {
+ additionalArgs.invokeCount += 1
+ console.log("Lambda invoke complete with token "+ data.nextToken);
+ console.log("InvokeCount " + additionalArgs.invokeCount);
+ await invoke_lambda(context, data.nextToken, additionalArgs, errorHandler);
+ return
}
- };
- return p.then(cb).catch(function (err) {
- errorHandler(err, "Error in fetching logGroups");
- });
-}
+ console.log("Remaining time " + remainingTime);
+ console.log("Log Groups remaining...Calling the lambda again with token " + data.nextToken);
+ await processExistingLogGroups(context, data.nextToken, additionalArgs, errorHandler)
+ } else {
+ console.log("Total " + additionalArgs.subscribeCount + " out of " + additionalArgs.recordCount
+ + " Log Groups are subscribed to Destination Type "
+ + process.env.DESTINATION_ARN);
+ console.log("Last invokeCount " + additionalArgs.invokeCount);
+ errorHandler(null, "Success");
+ }
+ } catch (err) {
+ errorHandler(err, "Error in fetching logGroups");
+ }
+ }
-function invoke_lambda(context, token, errorHandler) {
- var lambda = new AWS.Lambda();
- var payload = {"existingLogs": "true", "token": token};
- lambda.invoke({
- InvocationType: 'Event',
- FunctionName: context.functionName,
- Payload: JSON.stringify(payload),
- }, errorHandler);
+async function invoke_lambda(context, token, additionalArgs, errorHandler) {
+ var payload = {"existingLogs": "true", "token": token, "additionalArgs": additionalArgs};
+ try {
+ await lambda.send(new InvokeCommand({
+ InvocationType: 'Event',
+ FunctionName: context.functionName,
+ Payload: JSON.stringify(payload)
+ }));
+ } catch (err) {
+ errorHandler(err, "Error invoking Lambda");
+ }
}
-function processEvents(env, event, errorHandler) {
-
- var logGroupName = event.detail.requestParameters.logGroupName;
- if (filterLogGroups(event, env.LOG_GROUP_PATTERN)) {
- console.log("Subscribing: ", logGroupName, env.DESTINATION_ARN);
- createSubscriptionFilter(logGroupName, env.DESTINATION_ARN, env.ROLE_ARN).catch (function (err) {
- errorHandler(err, "Error in Subscribing.");
- });
- } else {
- console.log("Unmatched: ", logGroupName, env.DESTINATION_ARN);
- }
+async function delay(ms) {
+ return new Promise(resolve => setTimeout(resolve, ms));
+}
+async function processEvents(env, event, additionalArgs, errorHandler, retryCounter=0) {
+ var logGroupName = event.detail.requestParameters.logGroupName;
+ var logGroupRegex = validateRegex(env.LOG_GROUP_PATTERN);
+ console.log("logGroupRegex: ", logGroupRegex);
+ if (filterNewLogGroups(event, logGroupRegex)) {
+ console.log("Subscribing: ", logGroupName, env.DESTINATION_ARN);
+ try {
+ await createSubscriptionFilter(logGroupName, env.DESTINATION_ARN, env.ROLE_ARN, additionalArgs);
+ } catch (err) {
+ errorHandler(err, "Error in Subscribing.");
+ if (err && err.message === "Rate exceeded" && retryCounter <= maxRetryCounter) {
+ retryCounter += 1
+ const delayTime = Math.pow(2, retryCounter) * 1000; // Exponential backoff
+ console.log(`ThrottlingException encountered. Retrying in ${delayTime}ms...Attempt ${retryCounter}/${maxRetryCounter}`);
+ await delay(delayTime);
+ await processEvents(env, event, additionalArgs, errorHandler, retryCounter);
+ }
+ };
+ } else {
+ console.log("Unmatched: ", logGroupName, env.DESTINATION_ARN);
+ }
}
-exports.handler = function (event, context, callback) {
- console.log("Invoking Log Group connector function");
- function errorHandler(err, msg) {
- if (err) {
- console.log(err, msg);
- callback(err);
- } else {
- callback(null, "Success");
- }
+exports.handler = async function (event, context, callback) {
+ let additionalArgs = {
+ recordCount: 0,
+ subscribeCount: 0,
+ invokeCount: 0
+ };
+ if (event.additionalArgs) {
+ additionalArgs = event.additionalArgs
+ }
+ console.log("Invoking Log Group connector function");
+ function errorHandler(err, msg) {
+ if (err) {
+ console.log(err, msg);
+ callback(err);
+ } else {
+ callback(null, "Success");
+ }
+ }
+ if (!process.env.LOG_GROUP_PATTERN || process.env.LOG_GROUP_PATTERN.trim().length === 0) {
+ console.warn("LOG_GROUP_PATTERN is empty, it will subscribe to all loggroups");
}
- if (event.existingLogs == "true") {
- processExistingLogGroups(event.token, context, errorHandler);
+ if (event.existingLogs == "true") {
+ await processExistingLogGroups(context, event.token, additionalArgs, errorHandler);
} else {
- processEvents(process.env, event, errorHandler);
+ await processEvents(process.env, event, additionalArgs, errorHandler);
}
-};
+};
\ No newline at end of file
diff --git a/loggroup-lambda-connector/test/requirements.txt b/loggroup-lambda-connector/test/requirements.txt
index 6e204ce..f5ffcb8 100644
--- a/loggroup-lambda-connector/test/requirements.txt
+++ b/loggroup-lambda-connector/test/requirements.txt
@@ -1,3 +1,3 @@
-requests==2.20.0
-boto3==1.5.1
-cfn-flip>=1.2.3
\ No newline at end of file
+requests>=2.32.4
+boto3==1.36.11
+cfn-flip>=1.3.0
diff --git a/loggroup-lambda-connector/test/test-template.yaml b/loggroup-lambda-connector/test/test-template.yaml
index 02f35b2..a875235 100644
--- a/loggroup-lambda-connector/test/test-template.yaml
+++ b/loggroup-lambda-connector/test/test-template.yaml
@@ -23,6 +23,13 @@ Parameters:
AllowedValues: [ "true", "false" ]
Description: "Select true for subscribing existing logs"
+ LogGroupTags:
+ Type: String
+ Default: ""
+ Description: Enter comma separated keyvalue pairs for filtering logGroups using
+ tags. Ex KeyName1=string,KeyName2=string. This is optional leave it blank if
+ tag based filtering is not needed.
+
BucketName:
Type: String
Default: ""
@@ -72,7 +79,7 @@ Resources:
print("success")
Handler: index.lambda_handler
MemorySize: 128
- Runtime: python3.7
+ Runtime: python3.12
Timeout: 60
Role: !GetAtt LambdaRole.Arn
@@ -214,6 +221,7 @@ Resources:
DestinationArnValue: !If [ create_invoke_permission, !GetAtt DummyLambda.Arn, !GetAtt KinesisLogsDeliveryStream.Arn ]
LogGroupPattern: !Ref LogGroupPattern
UseExistingLogs: !Ref UseExistingLogs
+ LogGroupTags: !Ref LogGroupTags
RoleArn: !If [ create_invoke_permission, "", !GetAtt KinesisLogsRole.Arn ]
Outputs:
diff --git a/loggroup-lambda-connector/test/test_loggroup_lambda_connector.py b/loggroup-lambda-connector/test/test_loggroup_lambda_connector.py
index c0df884..f650fff 100644
--- a/loggroup-lambda-connector/test/test_loggroup_lambda_connector.py
+++ b/loggroup-lambda-connector/test/test_loggroup_lambda_connector.py
@@ -72,7 +72,26 @@ def test_4_existing_kinesis(self):
#self.invoke_lambda()
self.assert_subscription_filter("SumoLGLBDFilter")
- def create_stack_parameters(self, destination, existing, pattern='test'):
+ def test_5_matching_existing_loggroup_with_pattern_and_tag(self):
+ self.create_log_group_with_tag()
+ self.create_stack(self.stack_name, self.template_data,
+ self.create_stack_parameters("Kinesis","true", loggroup_tag='env=prod'))
+ print("Testing Stack Creation")
+ self.assertTrue(self.stack_exists(self.stack_name))
+ #self.invoke_lambda()
+ self.assert_subscription_filter("SumoLGLBDFilter")
+
+ def test_6_matching_existing_loggroup_by_tag_only(self):
+ self.create_log_group_with_tag()
+ self.create_stack(self.stack_name, self.template_data,
+ self.create_stack_parameters("Kinesis","true", loggroup_pattern='^$',
+ loggroup_tag='team=apps'))
+ print("Testing Stack Creation")
+ self.assertTrue(self.stack_exists(self.stack_name))
+ #self.invoke_lambda()
+ self.assert_subscription_filter("SumoLGLBDFilter")
+
+ def create_stack_parameters(self, destination, existing, loggroup_pattern='test', loggroup_tag=''):
return [
{
'ParameterKey': 'DestinationType',
@@ -80,7 +99,11 @@ def create_stack_parameters(self, destination, existing, pattern='test'):
},
{
'ParameterKey': 'LogGroupPattern',
- 'ParameterValue': pattern
+ 'ParameterValue': loggroup_pattern
+ },
+ {
+ 'ParameterKey': 'LogGroupTags',
+ 'ParameterValue': loggroup_tag
},
{
'ParameterKey': 'UseExistingLogs',
@@ -137,6 +160,16 @@ def create_log_group(self):
response = self.log_group_client.create_log_group(logGroupName=self.log_group_name)
print("creating log group", response)
+ def create_log_group_with_tag(self):
+ tags = {
+ 'team': 'apps',
+ 'env': 'prod'
+ }
+ self.log_group_name = 'mytag-%s' % (datetime.datetime.now().strftime("%d-%m-%y-%H-%M-%S"))
+ print("Loggroup Name", self.log_group_name)
+ response = self.log_group_client.create_log_group(logGroupName=self.log_group_name, tags=tags)
+ print("creating log group", response)
+
def assert_subscription_filter(self, filter_name):
sleep(60)
response = self.log_group_client.describe_subscription_filters(
@@ -188,7 +221,7 @@ def upload_to_s3(file_path):
bucket_name = get_bucket_name()
key = os.path.basename(file_path)
filename = os.path.join(os.path.dirname(os.path.abspath(__file__)), file_path)
- s3.upload_file(os.path.join(__file__, filename), bucket_name, key, ExtraArgs={'ACL': 'public-read'})
+ s3.upload_file(os.path.join(__file__, filename), bucket_name, key)
def create_sam_package_and_upload():
@@ -205,7 +238,8 @@ def create_sam_package_and_upload():
def _run(command, input=None, check=False, **kwargs):
if sys.version_info >= (3, 5):
- return subprocess.run(command, capture_output=True)
+ result = subprocess.run(command, capture_output=True)
+ return result.returncode, result.stdout, result.stderr
if input is not None:
if 'stdin' in kwargs:
raise ValueError('stdin and input arguments may not both be used.')
@@ -226,11 +260,11 @@ def _run(command, input=None, check=False, **kwargs):
def run_command(cmdargs):
- resp = _run(cmdargs)
- if len(resp.stderr.decode()) > 0:
+ retcode, stdout, stderr = _run(cmdargs)
+ if retcode != 0:
# traceback.print_exc()
- raise Exception("Error in run command %s cmd: %s" % (resp, cmdargs))
- return resp.stdout
+ raise Exception("Error in run command %s cmd: %s" % (stderr, cmdargs))
+ return retcode, stdout, stderr
if __name__ == '__main__':
diff --git a/securityhub-collector/LICENSE b/securityhub-collector/LICENSE
new file mode 100644
index 0000000..ba07b59
--- /dev/null
+++ b/securityhub-collector/LICENSE
@@ -0,0 +1,201 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright 2021 Sumo Logic Inc.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/securityhub-collector/sam/create_layer.sh b/securityhub-collector/sam/create_layer.sh
deleted file mode 100644
index 99ea7a9..0000000
--- a/securityhub-collector/sam/create_layer.sh
+++ /dev/null
@@ -1,26 +0,0 @@
-#!bash/bin
-
-if [ ! -f securityhub_deps.zip ]; then
- echo "creating zip file"
- mkdir python
- cd python
- pip install -r ../requirements.txt -t ./
- zip -r ../securityhub_deps.zip .
- cd ..
-fi
-
-declare -a regions=("us-east-2" "us-east-1" "us-west-1" "us-west-2" "ap-south-1" "ap-northeast-2" "ap-southeast-1" "ap-southeast-2" "ap-northeast-1" "ca-central-1" "eu-central-1" "eu-west-1" "eu-west-2" "eu-west-3" "sa-east-1")
-
-for i in "${regions[@]}"
-do
- echo "Deploying layer in $i"
- bucket_name="appdevzipfiles-$i"
- aws s3 cp securityhub_deps.zip s3://$bucket_name/ --region $i
-
- aws lambda publish-layer-version --layer-name securityhub_deps --description "contains securityhub solution dependencies" --license-info "MIT" --content S3Bucket=$bucket_name,S3Key=securityhub_deps.zip --compatible-runtimes python3.7 python3.6 --region $i
-
- aws lambda add-layer-version-permission --layer-name securityhub_deps --statement-id securityhub-deps --version-number 1 --principal '*' --action lambda:GetLayerVersion --region $i
-done
-
-# aws lambda remove-layer-version-permission --layer-name securityhub_deps --version-number 1 --statement-id securityhub-deps --region us-east-1
-# aws lambda get-layer-version-policy --layer-name securityhub_deps --region us-east-1
diff --git a/securityhub-collector/sam/packaged.yaml b/securityhub-collector/sam/packaged.yaml
index 98daed7..8f34a55 100755
--- a/securityhub-collector/sam/packaged.yaml
+++ b/securityhub-collector/sam/packaged.yaml
@@ -8,6 +8,25 @@ Description: 'This solution consists of a lambda function which which gets trigg
Globals:
Function:
Timeout: 300
+Metadata:
+ AWS::ServerlessRepo::Application:
+ Author: Sumo Logic
+ Description: This solution consists of a lambda function which which gets triggered
+ by CloudWatch events with findings as payload which are then ingested to Sumo
+ Logic via S3 source.
+ HomePageUrl: https://github.com/SumoLogic/sumologic-aws-lambda
+ Labels:
+ - sumologic
+ - serverless
+ - security
+ - cloudwatchevents
+ - securityhub
+ Name: sumologic-securityhub-collector
+ LicenseUrl: s3://appdevstore/SecurityHubCollector/v1.0.10/6092dd6c323e33634657102f570628e0
+ ReadmeUrl: s3://appdevstore/SecurityHubCollector/v1.0.10/3edeb049c0e4202e9588e43b957090ed
+ SemanticVersion: 1.0.10
+ SourceCodeUrl: https://github.com/SumoLogic/sumologic-aws-lambda/tree/master/securityhub-collector
+ SpdxLicenseId: Apache-2.0
Parameters:
S3SourceBucketName:
Type: String
@@ -16,8 +35,8 @@ Resources:
Type: AWS::Serverless::Function
Properties:
Handler: securityhub_collector.lambda_handler
- Runtime: python3.7
- CodeUri: s3://appdevstore/3821fd9c5288ebaca71e4ea0b26629ab
+ Runtime: python3.13
+ CodeUri: s3://appdevstore/SecurityHubCollector/v1.0.10/3c2bc2da7576810682419519fdc578bb
MemorySize: 128
Timeout: 300
Policies:
@@ -40,6 +59,8 @@ Resources:
Pattern:
source:
- aws.securityhub
+ Metadata:
+ SamResourceId: SecurityHubCollectorFunction
Outputs:
SecurityHubCollectorFunction:
Description: SecurityHubCollector Function ARN
diff --git a/securityhub-collector/sam/requirements.txt b/securityhub-collector/sam/requirements.txt
index a5f98ce..ea9cc41 100644
--- a/securityhub-collector/sam/requirements.txt
+++ b/securityhub-collector/sam/requirements.txt
@@ -1 +1 @@
-boto3==1.9.60
+boto3==1.36.11
diff --git a/securityhub-collector/sam/sam_package.sh b/securityhub-collector/sam/sam_package.sh
index 72f12eb..1ebab25 100755
--- a/securityhub-collector/sam/sam_package.sh
+++ b/securityhub-collector/sam/sam_package.sh
@@ -1,16 +1,34 @@
-#!/bin/bash
+export AWS_REGION="us-east-1"
+export AWS_PROFILE="prod"
if [ "$AWS_PROFILE" == "prod" ]
then
SAM_S3_BUCKET="appdevstore"
AWS_REGION="us-east-1"
else
- SAM_S3_BUCKET="cf-templates-5d0x5unchag-us-east-2"
+ SAM_S3_BUCKET="appstore-20231108-securityhub-collector"
AWS_REGION="us-east-2"
fi
-sam package --template-file template.yaml --s3-bucket $SAM_S3_BUCKET --output-template-file packaged.yaml
-sam deploy --template-file packaged.yaml --stack-name testingsecurityhubcollector --capabilities CAPABILITY_IAM --region $AWS_REGION --parameter-overrides S3SourceBucketName=securityhubfindings
+version="1.0.10"
+echo "Creating package.yaml"
+sam package --template-file template.yaml --s3-bucket $SAM_S3_BUCKET --output-template-file packaged.yaml --s3-prefix "SecurityHubCollector/v"$version --region $AWS_REGION --profile $AWS_PROFILE
+
+if [ $? -ne 0 ]
+then
+ echo "Creating package command failed!"
+ exit 1
+else
+ echo "package.yaml created"
+fi
+
+echo "Publishing sumologic-securityhub-collector "$version
+sam publish --template packaged.yaml --region $AWS_REGION --semantic-version $version
+
+# sam deploy --template-file packaged.yaml --stack-name testingsechubcollector --capabilities CAPABILITY_IAM --region $AWS_REGION --parameter-overrides S3SourceBucketName=securityhubfindings
+
+echo "Published sumologic-securityhub-collector "$version
+
#aws cloudformation describe-stack-events --stack-name testingsecurityhublambda --region $AWS_REGION
#aws cloudformation get-template --stack-name testingsecurityhublambda --region $AWS_REGION
-# aws serverlessrepo create-application-version --region us-east-1 --application-id arn:aws:serverlessrepo:us-east-1:$AWS_ACCOUNT_ID:applications/sumologic-securityhub-connector --semantic-version 1.0.1 --template-body file://packaged.yaml
+# aws serverlessrepo create-application-version --region us-east-1 --application-id arn:aws:serverlessrepo:us-east-1:$AWS_ACCOUNT_ID:applications/sumologic-securityhub-collector --semantic-version 1.0.1 --template-body file://packaged.yaml
diff --git a/securityhub-collector/sam/template.yaml b/securityhub-collector/sam/template.yaml
index 036e660..16a2205 100755
--- a/securityhub-collector/sam/template.yaml
+++ b/securityhub-collector/sam/template.yaml
@@ -7,6 +7,24 @@ Globals:
Function:
Timeout: 300
+Metadata:
+ AWS::ServerlessRepo::Application:
+ Author: Sumo Logic
+ Description: This solution consists of a lambda function which which gets triggered by CloudWatch events with findings as payload which are then ingested to Sumo Logic via S3 source.
+ HomePageUrl: https://github.com/SumoLogic/sumologic-aws-lambda
+ Labels:
+ - sumologic
+ - serverless
+ - security
+ - cloudwatchevents
+ - securityhub
+ Name: sumologic-securityhub-collector
+ LicenseUrl: ../LICENSE
+ ReadmeUrl: ../README.md
+ SemanticVersion: 1.0.10
+ SourceCodeUrl: https://github.com/SumoLogic/sumologic-aws-lambda/tree/master/securityhub-collector
+ SpdxLicenseId: Apache-2.0
+
Parameters:
S3SourceBucketName:
Type: String
@@ -17,7 +35,7 @@ Resources:
Type: 'AWS::Serverless::Function'
Properties:
Handler: securityhub_collector.lambda_handler
- Runtime: python3.7
+ Runtime: python3.13
CodeUri: ../src/
MemorySize: 128
Timeout: 300
diff --git a/securityhub-collector/src/securityhub_collector.py b/securityhub-collector/src/securityhub_collector.py
index cc7d447..bbcf62b 100644
--- a/securityhub-collector/src/securityhub_collector.py
+++ b/securityhub-collector/src/securityhub_collector.py
@@ -56,7 +56,7 @@ def lambda_handler(event, context):
if __name__ == '__main__':
- event = json.load(open('../sam/event.json'))
+ event = json.load(open('../test/event.json'))
BUCKET_NAME = "securityhubfindings"
class context:
diff --git a/securityhub-collector/sam/event.json b/securityhub-collector/test/event.json
similarity index 100%
rename from securityhub-collector/sam/event.json
rename to securityhub-collector/test/event.json
diff --git a/securityhub-forwarder/LICENSE b/securityhub-forwarder/LICENSE
new file mode 100644
index 0000000..ba07b59
--- /dev/null
+++ b/securityhub-forwarder/LICENSE
@@ -0,0 +1,201 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright 2021 Sumo Logic Inc.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/securityhub-forwarder/sam/packaged.yaml b/securityhub-forwarder/sam/packaged.yaml
index a27e919..515687a 100755
--- a/securityhub-forwarder/sam/packaged.yaml
+++ b/securityhub-forwarder/sam/packaged.yaml
@@ -1,28 +1,48 @@
AWSTemplateFormatVersion: '2010-09-09'
+Transform: AWS::Serverless-2016-10-31
Description: 'This function is invoked by Sumo Logic(via Scheduled Search) through
API Gateway. The event payload received is then forwarded to AWS Security Hub.
'
-Outputs:
- SecurityHubForwarderApiUrl:
- Description: URL of your API endpoint
- Value:
- Fn::Sub: https://${SecurityHubForwarderApiGateway}.execute-api.${AWS::Region}.amazonaws.com/prod/findings
- SecurityHubForwarderFunction:
- Description: SecurityHubForwarder Function ARN
- Value:
- Fn::GetAtt:
- - SecurityHubForwarderFunction
- - Arn
+Metadata:
+ AWS::ServerlessRepo::Application:
+ Author: Sumo Logic
+ Description: This lambda function is used for importing findings from Sumo Logic
+ to AWS Security Hub. The function is invoked by Sumo Logic(via Scheduled Search)
+ through API Gateway. The event payload received is then forwarded to AWS Security
+ Hub.
+ HomePageUrl: https://github.com/SumoLogic/sumologic-aws-lambda
+ Labels:
+ - sumologic
+ - serverless
+ - security
+ - cloudwatchevents
+ - securityhub
+ Name: sumologic-securityhub-forwarder
+ LicenseUrl: s3://appdevstore/SecurityHubForwarder/v1.0.11/6092dd6c323e33634657102f570628e0
+ ReadmeUrl: s3://appdevstore/SecurityHubForwarder/v1.0.11/56f774fe371f787c17ec137a47c93138
+ SemanticVersion: 1.0.11
+ SourceCodeUrl: https://github.com/SumoLogic/sumologic-aws-lambda/tree/master/securityhub-forwarder
+ SpdxLicenseId: Apache-2.0
Resources:
SecurityHubForwarderApiGateway:
+ Type: AWS::Serverless::Api
Properties:
+ StageName: prod
+ EndpointConfiguration: EDGE
DefinitionBody:
+ swagger: '2.0'
info:
- description: API endpoint for invoking SecurityHubForwarderFunction
title:
Ref: AWS::StackName
+ description: API endpoint for invoking SecurityHubForwarderFunction
version: 1.0.0
+ securityDefinitions:
+ sigv4:
+ type: apiKey
+ name: Authorization
+ in: header
+ x-amazon-apigateway-authtype: awsSigv4
paths:
/findings:
post:
@@ -34,46 +54,48 @@ Resources:
security:
- sigv4: []
x-amazon-apigateway-integration:
- httpMethod: POST
- passthroughBehavior: when_no_match
- requestParameters:
- integration.request.header.X-Amz-Invocation-Type: '''RequestResponse'''
type: aws_proxy
uri:
Fn::Sub: arn:aws:apigateway:${AWS::Region}:lambda:path/2015-03-31/functions/${SecurityHubForwarderFunction.Arn}/invocations
- securityDefinitions:
- sigv4:
- in: header
- name: Authorization
- type: apiKey
- x-amazon-apigateway-authtype: awsSigv4
- swagger: '2.0'
- EndpointConfiguration: EDGE
- StageName: prod
- Type: AWS::Serverless::Api
+ passthroughBehavior: when_no_match
+ httpMethod: POST
+ requestParameters:
+ integration.request.header.X-Amz-Invocation-Type: '''RequestResponse'''
+ Metadata:
+ SamResourceId: SecurityHubForwarderApiGateway
SecurityHubForwarderFunction:
+ Type: AWS::Serverless::Function
Properties:
- CodeUri: s3://appdevstore/98ee274ed4543bd1e1344fec701211df
- Events:
- Api1:
- Properties:
- Method: POST
- Path: /findings
- RestApiId:
- Ref: SecurityHubForwarderApiGateway
- Type: Api
Handler: securityhub_forwarder.lambda_handler
- Layers:
- - Fn::Sub: arn:aws:lambda:${AWS::Region}:956882708938:layer:securityhub_deps:1
+ Runtime: python3.13
+ CodeUri: s3://appdevstore/SecurityHubForwarder/v1.0.11/3589bbeb5907b695780f650d5374a74d
MemorySize: 128
+ Timeout: 300
Policies:
- Statement:
- - Action:
- - securityhub:BatchImportFindings
+ - Sid: SecurityHubImportFindingsPolicy
Effect: Allow
+ Action:
+ - securityhub:BatchImportFindings
Resource: arn:aws:securityhub:*:*:*
- Sid: SecurityHubImportFindingsPolicy
- Runtime: python3.7
- Timeout: 300
- Type: AWS::Serverless::Function
-Transform: AWS::Serverless-2016-10-31
+ Events:
+ Api1:
+ Type: Api
+ Properties:
+ Path: /findings
+ Method: POST
+ RestApiId:
+ Ref: SecurityHubForwarderApiGateway
+ Metadata:
+ SamResourceId: SecurityHubForwarderFunction
+Outputs:
+ SecurityHubForwarderFunction:
+ Description: SecurityHubForwarder Function ARN
+ Value:
+ Fn::GetAtt:
+ - SecurityHubForwarderFunction
+ - Arn
+ SecurityHubForwarderApiUrl:
+ Description: URL of your API endpoint
+ Value:
+ Fn::Sub: https://${SecurityHubForwarderApiGateway}.execute-api.${AWS::Region}.amazonaws.com/prod/findings
diff --git a/securityhub-forwarder/sam/requirements.txt b/securityhub-forwarder/sam/requirements.txt
index e48ddb1..ea9cc41 100644
--- a/securityhub-forwarder/sam/requirements.txt
+++ b/securityhub-forwarder/sam/requirements.txt
@@ -1 +1 @@
-boto3==1.9.66
+boto3==1.36.11
diff --git a/securityhub-forwarder/sam/sam_package.sh b/securityhub-forwarder/sam/sam_package.sh
index c27f86a..397e2e0 100755
--- a/securityhub-forwarder/sam/sam_package.sh
+++ b/securityhub-forwarder/sam/sam_package.sh
@@ -1,5 +1,7 @@
#!/bin/bash
+export AWS_PROFILE="prod"
+
if [ "$AWS_PROFILE" == "prod" ]
then
SAM_S3_BUCKET="appdevstore"
@@ -8,10 +10,25 @@ else
SAM_S3_BUCKET="cf-templates-5d0x5unchag-us-east-2"
AWS_REGION="us-east-2"
fi
+version="1.0.11"
+
+echo "Creating package.yaml"
+sam package --template-file template.yaml --s3-bucket $SAM_S3_BUCKET --output-template-file packaged.yaml --s3-prefix "SecurityHubForwarder/v"$version --region $AWS_REGION --profile $AWS_PROFILE
+
+if [ $? -ne 0 ]
+then
+ echo "Creating package command failed!"
+ exit 1
+else
+ echo "package.yaml created"
+fi
+
+echo "Publishing sumologic-securityhub-forwarder "$version
+sam publish --template packaged.yaml --region $AWS_REGION --semantic-version $version
-sam package --template-file template.yaml --s3-bucket $SAM_S3_BUCKET --output-template-file packaged.yaml
+echo "Published sumologic-securityhub-forwarder "$version
-sam deploy --template-file packaged.yaml --stack-name testingsecurityhubforwarder --capabilities CAPABILITY_IAM --region $AWS_REGION
+# sam deploy --template-file packaged.yaml --stack-name testingsechubforwarder --capabilities CAPABILITY_IAM --region $AWS_REGION
#aws cloudformation describe-stack-events --stack-name testingsecurityhublambda --region $AWS_REGION
#aws cloudformation get-template --stack-name testingsecurityhublambda --region $AWS_REGION
# aws serverlessrepo create-application-version --region us-east-1 --application-id arn:aws:serverlessrepo:us-east-1:$AWS_ACCOUNT_ID:applications/sumologic-securityhub-forwarder --semantic-version 1.0.1 --template-body file://packaged.yaml
diff --git a/securityhub-forwarder/sam/template.yaml b/securityhub-forwarder/sam/template.yaml
index 8a30d72..3fee78e 100755
--- a/securityhub-forwarder/sam/template.yaml
+++ b/securityhub-forwarder/sam/template.yaml
@@ -2,6 +2,23 @@ AWSTemplateFormatVersion: '2010-09-09'
Transform: 'AWS::Serverless-2016-10-31'
Description: >
This function is invoked by Sumo Logic(via Scheduled Search) through API Gateway. The event payload received is then forwarded to AWS Security Hub.
+Metadata:
+ AWS::ServerlessRepo::Application:
+ Author: Sumo Logic
+ Description: This lambda function is used for importing findings from Sumo Logic to AWS Security Hub. The function is invoked by Sumo Logic(via Scheduled Search) through API Gateway. The event payload received is then forwarded to AWS Security Hub.
+ HomePageUrl: https://github.com/SumoLogic/sumologic-aws-lambda
+ Labels:
+ - sumologic
+ - serverless
+ - security
+ - cloudwatchevents
+ - securityhub
+ Name: sumologic-securityhub-forwarder
+ LicenseUrl: ../LICENSE
+ ReadmeUrl: ../README.md
+ SemanticVersion: 1.0.11
+ SourceCodeUrl: https://github.com/SumoLogic/sumologic-aws-lambda/tree/master/securityhub-forwarder
+ SpdxLicenseId: Apache-2.0
Resources:
SecurityHubForwarderApiGateway:
@@ -45,9 +62,7 @@ Resources:
Type: 'AWS::Serverless::Function'
Properties:
Handler: securityhub_forwarder.lambda_handler
- Runtime: python3.7
- Layers:
- - !Sub 'arn:aws:lambda:${AWS::Region}:956882708938:layer:securityhub_deps:1'
+ Runtime: python3.13
CodeUri: ../src/
MemorySize: 128
Timeout: 300
diff --git a/securityhub-forwarder/test/testevent.json b/securityhub-forwarder/test/testevent.json
new file mode 100644
index 0000000..889ef18
--- /dev/null
+++ b/securityhub-forwarder/test/testevent.json
@@ -0,0 +1,3 @@
+{
+ "body":"{\"Types\": \"Software and Configuration Checks\/Industry and Regulatory Standards\/HIPAA Controls\", \"Description\": \"This search gives top 10 resources which are accessed in last 15 minutes\", \"GeneratorID\": \"InsertFindingsScheduledSearch\", \"Severity\": 30, \"SourceUrl\": \"https:\/\/service.sumologic.com\/ui\/#\/search\/RmC8kAUGZbXrkj2rOFmUxmHtzINUgfJnFplh3QWY\", \"ComplianceStatus\": \"FAILED\", \"Rows\": \"[{\\\"Timeslice\\\":1545042427000,\\\"finding_time\\\":\\\"1545042427000\\\",\\\"item_name\\\":\\\"A nice dashboard.png\\\",\\\"title\\\":\\\"Vulnerability: Apple iTunes m3u Playlist File Title Parsing Buffer Overflow Vulnerability(34886) found on 207.235.176.3\\\",\\\"resource_id\\\":\\\"10.178.11.43\\\",\\\"resource_type\\\":\\\"Other\\\"},{\\\"Timeslice\\\":\\\"1545042427000\\\",\\\"finding_time\\\":\\\"1545042427000\\\",\\\"item_name\\\":\\\"Screen Shot 2014-07-30 at 11.39.29 PM.png\\\",\\\"title\\\":\\\"PCI Req 01: Traffic to Cardholder Environment: Direct external traffic to secure port on 10.178.11.43\\\",\\\"resource_id\\\":\\\"10.178.11.42\\\",\\\"resource_type\\\":\\\"AwsEc2Instance\\\"},{\\\"Timeslice\\\":\\\"1545042427000\\\",\\\"finding_time\\\":\\\"1545042427000\\\",\\\"item_name\\\":\\\"10388049_589057504526630_2031213996_n.jpg\\\",\\\"title\\\":\\\"Test Check Success for 207.235.176.5\\\",\\\"resource_id\\\":\\\"10.178.11.41\\\",\\\"resource_type\\\":\\\"Other\\\"}]\"}"
+}
diff --git a/securityhub-org/sumologic-securityhub-collector-awsorg/LICENSE b/securityhub-org/sumologic-securityhub-collector-awsorg/LICENSE
new file mode 100644
index 0000000..ba07b59
--- /dev/null
+++ b/securityhub-org/sumologic-securityhub-collector-awsorg/LICENSE
@@ -0,0 +1,201 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright 2021 Sumo Logic Inc.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/securityhub-org/sumologic-securityhub-collector-awsorg/README.md b/securityhub-org/sumologic-securityhub-collector-awsorg/README.md
new file mode 100644
index 0000000..ac99233
--- /dev/null
+++ b/securityhub-org/sumologic-securityhub-collector-awsorg/README.md
@@ -0,0 +1,33 @@
+# sumologic-securityhub-collector-aws-org
+
+This solution consists of a lambda function which which gets triggered by CloudWatch events with findings as payload which are then ingested to Sumo Logic HTTP endpoint.
+
+
+Made with ❤️ by Sumo Logic. Available on the [AWS Serverless Application Repository](https://aws.amazon.com/serverless)
+
+
+
+## Setup
+
+
+1. Configure a [Hosted Collector](https://help.sumologic.com/03Send-Data/Hosted-Collectors/Configure-a-Hosted-Collector) to Sumo Logic, and in Advanced Options for Logs, under Timestamp Format, click Specify a format and enter the following:
+Specify Format as yyyy-MM-dd'T'HH:mm:ss.SSS'Z'
+Specify Timestamp locator as .*"UpdatedAt":"(.*)".*
+
+2. Deploying the SAM Application
+ 1. Open a browser window and enter the following URL: https://serverlessrepo.aws.amazon.com/applications
+ 2. In the Serverless Application Repository, search for sumologic.
+ 3. Select Show apps that create custom IAM roles or resource policies check box.
+ 4. Click the sumologic-securityhub-collector-awsorg,link, and then click Deploy.
+ 5. In the Configure application parameters panel, enter the HTTP collector endpoint previously configured.
+ Click Deploy.
+
+
+## License
+
+Apache License 2.0 (Apache-2.0)
+
+
+## Support
+Requests & issues should be filed on GitHub: https://github.com/SumoLogic/sumologic-aws-lambda/issues
+
diff --git a/securityhub-org/sumologic-securityhub-collector-awsorg/images/sumologic-securityhub-collector-org.png b/securityhub-org/sumologic-securityhub-collector-awsorg/images/sumologic-securityhub-collector-org.png
new file mode 100644
index 0000000..6543f21
Binary files /dev/null and b/securityhub-org/sumologic-securityhub-collector-awsorg/images/sumologic-securityhub-collector-org.png differ
diff --git a/securityhub-org/sumologic-securityhub-collector-awsorg/sam/create_src.sh b/securityhub-org/sumologic-securityhub-collector-awsorg/sam/create_src.sh
new file mode 100755
index 0000000..574fbfd
--- /dev/null
+++ b/securityhub-org/sumologic-securityhub-collector-awsorg/sam/create_src.sh
@@ -0,0 +1,19 @@
+#!/bin/bash
+
+mkdir src
+ret=$?
+cd src
+cp -r ../../src/ .
+if [ "$ret" == "1" ]
+then
+ echo "src/ directory present in sam/ directory"
+else
+ echo "src/ directory created in sam/ directory"
+ pip install -r ../requirements.txt -t ./
+ chmod -R 755 .
+fi
+rm src.zip
+zip -r src.zip .
+mv src.zip ../../src/src.zip
+echo "please delete sam/src directory manually"
+cd ..
diff --git a/securityhub-org/sumologic-securityhub-collector-awsorg/sam/packaged.yaml b/securityhub-org/sumologic-securityhub-collector-awsorg/sam/packaged.yaml
new file mode 100644
index 0000000..6b33b55
--- /dev/null
+++ b/securityhub-org/sumologic-securityhub-collector-awsorg/sam/packaged.yaml
@@ -0,0 +1,66 @@
+AWSTemplateFormatVersion: '2010-09-09'
+Transform: AWS::Serverless-2016-10-31
+Description: 'This solution consists of a lambda function which which gets triggered
+ by CloudWatch events with findings as payload which are then ingested to Sumo Logic
+
+ '
+Globals:
+ Function:
+ Timeout: 300
+Metadata:
+ AWS::ServerlessRepo::Application:
+ Author: Sumo Logic
+ Description: This solution consists of a lambda function which which gets triggered
+ by CloudWatch events with findings as payload which are then ingested to Sumo
+ Logic via HTTP source.
+ HomePageUrl: https://github.com/SumoLogic/sumologic-aws-lambda
+ Labels:
+ - sumologic
+ - serverless
+ - security
+ - cloudwatchevents
+ - securityhub
+ Name: sumologic-securityhub-connector-aws-org
+ LicenseUrl: s3://appdevstore/SecurityHubCollectorAWSOrg/v1.0.8/6092dd6c323e33634657102f570628e0
+ ReadmeUrl: s3://appdevstore/SecurityHubCollectorAWSOrg/v1.0.8/eb911876461845db8f3113dbace6d454
+ SemanticVersion: 1.0.8
+ SourceCodeUrl: https://github.com/SumoLogic/sumologic-aws-lambda/tree/main/securityhub-org/sumologic-securityhub-collector-awsorg/src
+ SpdxLicenseId: Apache-2.0
+Parameters:
+ SumoEndpoint:
+ Description: SumoLogic Endpoint to post logs
+ Type: String
+Resources:
+ SecurityHubCollectorAwsOrg:
+ Metadata:
+ SkipBuild: true
+ cfn_nag:
+ rules_to_suppress:
+ - id: W89
+ reason: Lambda functions should be deployed inside a VPC
+ SamResourceId: SecurityHubCollectorAwsOrg
+ Type: AWS::Serverless::Function
+ Properties:
+ Handler: securityhub_collector_org.lambda_handler
+ Runtime: python3.13
+ CodeUri: s3://appdevstore/SecurityHubCollectorAWSOrg/v1.0.8/72d8a9923335d52f948cbd3f99d8b91d
+ MemorySize: 128
+ Timeout: 300
+ Environment:
+ Variables:
+ SUMO_ENDPOINT:
+ Ref: SumoEndpoint
+ Events:
+ CloudWatchEventTrigger:
+ Type: CloudWatchEvent
+ Properties:
+ Pattern:
+ source:
+ - aws.securityhub
+Outputs:
+ SecurityHubCollectorAwsOrg:
+ Description: SecurityHubCollectorAwsOrg Function ARN
+ Value:
+ Fn::GetAtt:
+ - SecurityHubCollectorAwsOrg
+ - Arn
diff --git a/securityhub-org/sumologic-securityhub-collector-awsorg/sam/requirements.txt b/securityhub-org/sumologic-securityhub-collector-awsorg/sam/requirements.txt
new file mode 100644
index 0000000..8eb3760
--- /dev/null
+++ b/securityhub-org/sumologic-securityhub-collector-awsorg/sam/requirements.txt
@@ -0,0 +1,2 @@
+# boto3==1.27.1
+requests==2.32.5
diff --git a/securityhub-org/sumologic-securityhub-collector-awsorg/sam/sam_package.sh b/securityhub-org/sumologic-securityhub-collector-awsorg/sam/sam_package.sh
new file mode 100755
index 0000000..4998cb1
--- /dev/null
+++ b/securityhub-org/sumologic-securityhub-collector-awsorg/sam/sam_package.sh
@@ -0,0 +1,34 @@
+#!/bin/bash
+
+export AWS_PROFILE="prod"
+if [ "$AWS_PROFILE" == "prod" ]
+then
+ SAM_S3_BUCKET="appdevstore"
+ AWS_REGION="us-east-1"
+else
+ SAM_S3_BUCKET=""
+ AWS_REGION="us-east-1"
+fi
+
+version="1.0.8"
+
+echo "Creating package.yaml"
+sam package --template-file template.yaml --s3-bucket $SAM_S3_BUCKET --output-template-file packaged.yaml --s3-prefix "SecurityHubCollectorAWSOrg/v"$version --region $AWS_REGION --profile $AWS_PROFILE
+
+if [ $? -ne 0 ]
+then
+ echo "Creating package command failed!"
+ exit 1
+else
+ echo "package.yaml created"
+fi
+
+echo "Publishing sumologic-securityhub-connector-aws-org "$version
+sam publish --template packaged.yaml --region $AWS_REGION --semantic-version $version
+
+echo "Published sumologic-securityhub-connector-aws-org "$version
+
+# sam deploy --template-file packaged.yaml --stack-name testingsechubawsorg --capabilities CAPABILITY_IAM --region $AWS_REGION --parameter-overrides ParameterKey=SumoEndpoint,ParameterValue=https://collectors.sumologic.com/receiver/v1/http/ZaVnC4dhaV29FhnR-VQyA9mpray7QOE0aRQrtZnuNmMQ0DKr9ZVMGY5WIa0IWSjt_LkiUSjI71WGiDHRHStqwCApBp_49e_W-b6gM0_KnZlxBUBe-1yTFw==
+
+#aws --profile awsorg cloudformation describe-stack-events --stack-name testingsecurityhubcollectorawsorg --region $AWS_REGION
+#aws --profile awsorg cloudformation get-template --stack-name testingsecurityhubcollectorawsorg --region $AWS_REGION
diff --git a/securityhub-org/sumologic-securityhub-collector-awsorg/sam/template.yaml b/securityhub-org/sumologic-securityhub-collector-awsorg/sam/template.yaml
new file mode 100644
index 0000000..68a50de
--- /dev/null
+++ b/securityhub-org/sumologic-securityhub-collector-awsorg/sam/template.yaml
@@ -0,0 +1,61 @@
+AWSTemplateFormatVersion: '2010-09-09'
+Transform: 'AWS::Serverless-2016-10-31'
+Description: >
+ This solution consists of a lambda function which which gets triggered by CloudWatch events with findings as payload which are then ingested to Sumo Logic
+
+Globals:
+ Function:
+ Timeout: 300
+Metadata:
+ AWS::ServerlessRepo::Application:
+ Author: Sumo Logic
+ Description: This solution consists of a lambda function which which gets triggered by CloudWatch events with findings as payload which are then ingested to Sumo Logic via HTTP source.
+ HomePageUrl: https://github.com/SumoLogic/sumologic-aws-lambda
+ Labels:
+ - sumologic
+ - serverless
+ - security
+ - cloudwatchevents
+ - securityhub
+ Name: sumologic-securityhub-connector-aws-org
+ LicenseUrl: ../LICENSE
+ ReadmeUrl: ../README.md
+ SemanticVersion: 1.0.8
+ SourceCodeUrl: https://github.com/SumoLogic/sumologic-aws-lambda/tree/main/securityhub-org/sumologic-securityhub-collector-awsorg/src
+ SpdxLicenseId: Apache-2.0
+
+Parameters:
+ SumoEndpoint:
+ Description: "SumoLogic Endpoint to post logs"
+ Type: "String"
+
+Resources:
+
+ SecurityHubCollectorAwsOrg:
+ Metadata:
+ SkipBuild: True
+ cfn_nag:
+ rules_to_suppress:
+ - id: W89
+ reason: "Lambda functions should be deployed inside a VPC"
+ Type: 'AWS::Serverless::Function'
+ Properties:
+ Handler: securityhub_collector_org.lambda_handler
+ Runtime: python3.13
+ CodeUri: ../src/src.zip
+ MemorySize: 128
+ Timeout: 300
+ Environment:
+ Variables:
+ SUMO_ENDPOINT: !Ref SumoEndpoint
+ Events:
+ CloudWatchEventTrigger:
+ Type: CloudWatchEvent
+ Properties:
+ Pattern:
+ source:
+ - aws.securityhub
+Outputs:
+ SecurityHubCollectorAwsOrg:
+ Description: "SecurityHubCollectorAwsOrg Function ARN"
+ Value: !GetAtt SecurityHubCollectorAwsOrg.Arn
diff --git a/securityhub-org/sumologic-securityhub-collector-awsorg/src/__init__.py b/securityhub-org/sumologic-securityhub-collector-awsorg/src/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/securityhub-org/sumologic-securityhub-collector-awsorg/src/securityhub_collector_org.py b/securityhub-org/sumologic-securityhub-collector-awsorg/src/securityhub_collector_org.py
new file mode 100644
index 0000000..a2f6e83
--- /dev/null
+++ b/securityhub-org/sumologic-securityhub-collector-awsorg/src/securityhub_collector_org.py
@@ -0,0 +1,59 @@
+import json
+import os
+import logging
+import sys
+import requests
+sys.path.insert(0, '/opt') # layer packages are in opt directory
+from collections import defaultdict
+
+SUMO_ENDPOINT = os.getenv("SUMO_ENDPOINT")
+logger = logging.getLogger()
+logger.setLevel(logging.INFO)
+session = requests.Session()
+headers = {'Content-Type': 'application/json', 'Accept': 'application/json'}
+
+def post_to_sumo(findings, silent=False):
+
+ findings_data = "\n\n".join([json.dumps(data) for data in findings])
+ is_success = False
+ try:
+ logger.info("findings_data: " + json.dumps(findings_data))
+ r = session.post(SUMO_ENDPOINT, data=findings_data, headers=headers)
+ except Exception as e:
+ logger.error("Failed to post findings to Sumo: %s" % str(e))
+ if not silent:
+ raise e
+
+ return is_success
+
+
+def send_findings(findings, context):
+
+ count = 0
+ if len(findings) > 0:
+ finding_buckets = defaultdict(list)
+ for f in findings:
+ finding_buckets[f['ProductArn']].append(f)
+ count += 1
+
+ for product_arn, finding_list in finding_buckets.items():
+ post_to_sumo(finding_list)
+
+ logger.info("Finished Sending NumFindings: %d" % (count))
+
+
+def lambda_handler(event, context):
+ logger.info("Invoking SecurityHubCollector source %s region %s" % (event['source'], event['region']))
+ findings = event['detail'].get('findings', [])
+ send_findings(findings, context)
+
+
+if __name__ == '__main__':
+
+ event = json.load(open('../sam/event.json'))
+ BUCKET_NAME = "securityhubfindings"
+
+ class context:
+ aws_request_id = "testid12323"
+
+ lambda_handler(event, context)
diff --git a/securityhub-org/sumologic-securityhub-collector-awsorg/src/src.zip b/securityhub-org/sumologic-securityhub-collector-awsorg/src/src.zip
new file mode 100644
index 0000000..7d89d72
Binary files /dev/null and b/securityhub-org/sumologic-securityhub-collector-awsorg/src/src.zip differ
diff --git a/securityhub-org/sumologic-securityhub-collector-awsorg/test/testevent.json b/securityhub-org/sumologic-securityhub-collector-awsorg/test/testevent.json
new file mode 100644
index 0000000..0c73b31
--- /dev/null
+++ b/securityhub-org/sumologic-securityhub-collector-awsorg/test/testevent.json
@@ -0,0 +1,54 @@
+{
+ "account": "956882702234",
+ "detail": {
+ "findings": [
+ {
+ "AwsAccountId": "956882702234",
+ "Compliance": {
+ "Status": "FAILED"
+ },
+ "CreatedAt": "2019-04-18T14:51:55.000000Z",
+ "Description": "This search gives top 10 resources which are accessed in last 15 minutes",
+ "FirstObservedAt": "2019-04-18T14:51:55.000000Z",
+ "GeneratorId": "InsertFindingsScheduledSearch",
+ "Id": "sumologic:us-east-2:956882702234:InsertFindingsScheduledSearch/finding/eb083fb9-03aa-4840-af0e-eb3ae4adebe9",
+ "ProductArn": "arn:aws:securityhub:us-east-2:956882702234:product/sumologicinc/sumologic-mda",
+ "ProductFields": {
+ "aws/securityhub/CompanyName": "Sumo Logic",
+ "aws/securityhub/FindingId": "arn:aws:securityhub:us-east-2:956882702234:product/sumologicinc/sumologic-mda/sumologic:us-east-2:956882702234:InsertFindingsScheduledSearch/finding/eb083fb9-03aa-4840-af0e-eb3ae4adebe9",
+ "aws/securityhub/ProductName": "Machine Data Analytics",
+ "aws/securityhub/SeverityLabel": "LOW"
+ },
+ "RecordState": "ACTIVE",
+ "Resources": [
+ {
+ "Id": "10.178.11.43",
+ "Type": "Other"
+ }
+ ],
+ "SchemaVersion": "2018-10-08",
+ "Severity": {
+ "Normalized": 30
+ },
+ "SourceUrl": "https://service.sumologic.com/ui/#/search/RmC8kAUGZbXrkj2rOFmUxmHtzINUgfJnFplh3QWY",
+ "Title": "Vulnerability: Apple iTunes m3u Playlist File Title Parsing Buffer Overflow Vulnerability(34886) found on 207.235.176.3",
+ "Types": [
+ "Software and Configuration Checks/Industry and Regulatory Standards/HIPAA Controls"
+ ],
+ "UpdatedAt": "2019-04-18T14:51:55.000000Z",
+ "WorkflowState": "NEW",
+ "approximateArrivalTimestamp": 1555599782.881,
+ "updatedAt": "2019-04-18T14:51:55.000000Z"
+ }
+ ]
+ },
+ "detail-type": "Security Hub Findings",
+ "id": "f06f61e9-b099-8321-e446-5a20583bd791",
+ "region": "us-east-2",
+ "resources": [
+ "arn:aws:securityhub:us-east-2:956882702234:product/sumologicinc/sumologic-mda/sumologic:us-east-2:956882702234:InsertFindingsScheduledSearch/finding/eb083fb9-03aa-4840-af0e-eb3ae4adebe9"
+ ],
+ "source": "aws.securityhub",
+ "time": "2019-04-18T15:03:04Z",
+ "version": "0"
+}
diff --git a/sumologic-app-utils/Test/SampleTemplate.yaml b/sumologic-app-utils/Test/SampleTemplate.yaml
index 306ae86..5c685fa 100644
--- a/sumologic-app-utils/Test/SampleTemplate.yaml
+++ b/sumologic-app-utils/Test/SampleTemplate.yaml
@@ -48,7 +48,7 @@ Resources:
Properties:
Location:
ApplicationId: arn:aws:serverlessrepo:us-east-1:956882708938:applications/sumologic-app-utils
- SemanticVersion: 2.0.6
+ SemanticVersion: 2.0.21
SumoLogicHelperPolicy:
Type: AWS::IAM::Policy
@@ -118,7 +118,7 @@ Resources:
DependsOn: SumoLogicHelperPolicy
Type: AWS::S3::BucketPolicy
Properties:
- Bucket: "cf-templates-1qpf3unpuo1hw-us-east-2"
+ Bucket: "cf-templates-1xa70np4sllja-us-east-1"
PolicyDocument:
Statement:
- Sid: AWSCloudTrailAclCheck
@@ -127,14 +127,14 @@ Resources:
Service: cloudtrail.amazonaws.com
Action: s3:GetBucketAcl
Resource:
- - "arn:aws:s3:::cf-templates-1qpf3unpuo1hw-us-east-2"
+ - "arn:aws:s3:::cf-templates-1xa70np4sllja-us-east-1"
- Sid: AWSCloudTrailWrite
Effect: Allow
Principal:
Service: cloudtrail.amazonaws.com
Action: s3:PutObject
Resource:
- - "arn:aws:s3:::cf-templates-1qpf3unpuo1hw-us-east-2/*"
+ - "arn:aws:s3:::cf-templates-1xa70np4sllja-us-east-1/*"
Condition:
StringEquals:
s3:x-amz-acl: bucket-owner-full-control
@@ -144,7 +144,7 @@ Resources:
Service: cloudtrail.amazonaws.com
Action: s3:ListBucket
Resource:
- - "arn:aws:s3:::cf-templates-1qpf3unpuo1hw-us-east-2"
+ - "arn:aws:s3:::cf-templates-1xa70np4sllja-us-east-1"
SumoCloudTrail:
Type: Custom::AWSTrail
@@ -153,7 +153,7 @@ Resources:
ServiceToken: !GetAtt SumoAppUtils.Outputs.SumoAppUtilsFunction
IsLogging: true
IsMultiRegionTrail: false
- S3BucketName: "cf-templates-1qpf3unpuo1hw-us-east-2"
+ S3BucketName: "cf-templates-1xa70np4sllja-us-east-1"
TrailName: "Aws-Observability-onemoreupdae"
RemoveOnDeleteStack: !Ref Section1eRemoveSumoResourcesOnDeleteStack
@@ -184,6 +184,38 @@ Resources:
SumoAccessKey: !Ref Section1cSumoAccessKey
SumoDeployment: !Ref Section1aSumoDeployment
+ KinesisFirehoseMetricsSource:
+ Type: Custom::HTTPSource
+ Properties:
+ ServiceToken: !GetAtt SumoAppUtils.Outputs.SumoAppUtilsFunction
+ Region: !Ref "AWS::Region"
+ SourceType: "KinesisMetric"
+ SourceName: "SourabhtestingKinesisFirehoseMetricsSource"
+ RemoveOnDeleteStack: !Ref Section1eRemoveSumoResourcesOnDeleteStack
+ SourceCategory: "TestingCategory"
+ CollectorId: !GetAtt SumoHostedCollector.COLLECTOR_ID
+ SumoAccessID: !Ref Section1bSumoAccessID
+ SumoAccessKey: !Ref Section1cSumoAccessKey
+ SumoDeployment: !Ref Section1aSumoDeployment
+ RoleArn: !GetAtt SumoRole.Arn
+
+ KinesisFirehoseLogsSource:
+ Type: Custom::HTTPSource
+ Properties:
+ ServiceToken: !GetAtt SumoAppUtils.Outputs.SumoAppUtilsFunction
+ Region: !Ref "AWS::Region"
+ SourceType: "KinesisLog"
+ SourceName: "SourabhtestingKinesisFirehoseLogsSource"
+ RemoveOnDeleteStack: !Ref Section1eRemoveSumoResourcesOnDeleteStack
+ SourceCategory: "TestingCategory"
+ CollectorId: !GetAtt SumoHostedCollector.COLLECTOR_ID
+ SumoAccessID: !Ref Section1bSumoAccessID
+ SumoAccessKey: !Ref Section1cSumoAccessKey
+ SumoDeployment: !Ref Section1aSumoDeployment
+ Fields:
+ namespace: "aws/lambda"
+ region: !Ref "AWS::Region"
+
SumoRole:
Type: AWS::IAM::Role
Properties:
@@ -209,6 +241,9 @@ Resources:
- s3:GetObjectVersion
- s3:ListBucketVersions
- s3:ListBucket
+ - tag:GetResources
+ - cloudwatch:ListMetrics
+ - cloudwatch:GetMetricStatistics
Resource:
"*"
@@ -225,7 +260,7 @@ Resources:
SumoAccessID: !Ref Section1bSumoAccessID
SumoAccessKey: !Ref Section1cSumoAccessKey
SumoDeployment: !Ref Section1aSumoDeployment
- TargetBucketName: "cf-templates-1qpf3unpuo1hw-us-east-1"
+ TargetBucketName: "cf-templates-1xa70np4sllja-us-east-1"
PathExpression: "asdasd"
DateFormat: "yyyy-MM-dd'T'HH:mm:ss.SSS'Z'"
DateLocatorRegex: '.*"updatedAt":"(.*)".*'
@@ -244,6 +279,7 @@ Resources:
SumoAccessID: !Ref Section1bSumoAccessID
SumoAccessKey: !Ref Section1cSumoAccessKey
SumoDeployment: !Ref Section1aSumoDeployment
+ orgid: !Ref Section1dSumoOrganizationId
sumoAppWithCustomURL:
Type: Custom::App
@@ -256,7 +292,8 @@ Resources:
SumoAccessID: !Ref Section1bSumoAccessID
SumoAccessKey: !Ref Section1cSumoAccessKey
SumoDeployment: !Ref Section1aSumoDeployment
- AppJsonS3Url: "https://sumologic-appdev-aws-sam-apps.s3.amazonaws.com/aws-observability-versions/v2.1.0/appjson/Alb-App.json"
+ AppJsonS3Url: "https://sumologic-appdev-aws-sam-apps.s3.amazonaws.com/aws-observability-versions/v2.13.0/appjson/Alb-App.json"
+ orgid: !Ref Section1dSumoOrganizationId
sumoAppwithUID:
Type: Custom::App
@@ -273,6 +310,7 @@ Resources:
SumoAccessID: !Ref Section1bSumoAccessID
SumoAccessKey: !Ref Section1cSumoAccessKey
SumoDeployment: !Ref Section1aSumoDeployment
+ orgid: !Ref Section1dSumoOrganizationId
CreateSumoLogicAWSExplorerView:
Type: Custom::SumoLogicAWSExplorer
@@ -304,14 +342,14 @@ Resources:
Properties:
ServiceToken: !GetAtt SumoAppUtils.Outputs.SumoAppUtilsFunction
Region: !Ref "AWS::Region"
- SourceApiUrl: "https://api.us2.sumologic.com/api/v1/collectors/194268335/sources/1135631121"
+ SourceApiUrl: "https://api.sumologic.com/api/v1/collectors/268544623/sources/1621987448"
RemoveOnDeleteStack: !Ref Section1eRemoveSumoResourcesOnDeleteStack
SumoAccessID: !Ref Section1bSumoAccessID
SumoAccessKey: !Ref Section1cSumoAccessKey
SumoDeployment: !Ref Section1aSumoDeployment
Fields:
- account: "fsdsd"
- region: "asfasf"
+ account: "sumo1"
+ region: "us-test-1"
APIGatewayFieldExtractionRule:
Type: Custom::SumoLogicFieldExtractionRule
@@ -364,9 +402,9 @@ Resources:
Properties:
ServiceToken: !GetAtt SumoAppUtils.Outputs.SumoAppUtilsFunction
AWSResource: "vpc"
- BucketName: "cf-templates-1qpf3unpuo1hw-us-east-1"
+ BucketName: "cf-templates-1xa70np4sllja-us-east-1"
Filter: ".*"
- BucketPrefix: "djvsdvsbdjb"
+ BucketPrefix: "vpc/logs"
AccountID: !Ref "AWS::AccountId"
RemoveOnDeleteStack: !Ref Section1eRemoveSumoResourcesOnDeleteStack
@@ -375,8 +413,8 @@ Resources:
Properties:
ServiceToken: !GetAtt SumoAppUtils.Outputs.SumoAppUtilsFunction
Region: !Ref "AWS::Region"
- FolderName: !Sub "Test Monitors"
- MonitorsS3Url: !Sub "https://sumologic-appdev-aws-sam-apps.s3.amazonaws.com/aws-observability-versions/v2.1.0/appjson/Alerts-App.json"
+ FolderName: "Test Monitors"
+ MonitorsS3Url: "https://sumologic-appdev-aws-sam-apps.s3.amazonaws.com/aws-observability-versions/v2.1.0/appjson/Alerts-App.json"
SuffixDateTime: true
RetainOldAlerts: true
RemoveOnDeleteStack: !Ref Section1eRemoveSumoResourcesOnDeleteStack
diff --git a/sumologic-app-utils/build.sh b/sumologic-app-utils/build.sh
new file mode 100755
index 0000000..023337b
--- /dev/null
+++ b/sumologic-app-utils/build.sh
@@ -0,0 +1,26 @@
+#!/bin/bash
+
+# Pull the Amazon lambda Linux image from Docker Hub
+docker pull public.ecr.aws/lambda/python:3.13-x86_64
+
+# Run the Amazon lambda Linux container in detached mode
+docker run -d --name sumologic-app-utils public.ecr.aws/lambda/python:3.13-x86_64 lambda_function.lambda_handler
+
+# Install dependencies inside the container
+docker exec -it sumologic-app-utils /bin/bash -c "dnf install -y zip"
+
+# Create a virtual environment and install dependencies
+docker exec -it sumologic-app-utils /bin/bash -c "python3 -m venv temp-venv && source temp-venv/bin/activate && mkdir sumo_app_utils && cd sumo_app_utils && pip install crhelper jsonschema requests retrying -t ."
+
+# Copy python file from host to container
+docker cp src/. sumologic-app-utils:/var/task/sumo_app_utils
+
+# Zip the contents of the sumologic-app-utils directory
+docker exec -it sumologic-app-utils /bin/bash -c "cd sumo_app_utils && ls -l && zip -r ../sumo_app_utils.zip ."
+
+# Copy the sumologic-app-utils.zip file from the container to the host
+docker cp sumologic-app-utils://var/task/sumo_app_utils.zip ./sumo_app_utils.zip
+
+# Stop and remove the container
+docker stop sumologic-app-utils
+docker rm sumologic-app-utils
\ No newline at end of file
diff --git a/sumologic-app-utils/deploy.sh b/sumologic-app-utils/deploy.sh
old mode 100644
new mode 100755
index d0bf395..da8f3ba
--- a/sumologic-app-utils/deploy.sh
+++ b/sumologic-app-utils/deploy.sh
@@ -1,6 +1,8 @@
#!/bin/bash
-if [ "$AWS_PROFILE" != "prod" ]
+export AWS_PROFILE="prod"
+export AWS_REGION="us-east-1"
+if [ "$AWS_PROFILE" == "prod" ]
then
SAM_S3_BUCKET="appdevstore"
AWS_REGION="us-east-1"
@@ -13,25 +15,29 @@ rm src/external/*.pyc
rm src/*.pyc
rm sumo_app_utils.zip
-if [ ! -f sumo_app_utils.zip ]; then
- echo "creating zip file"
- mkdir python
- cd python
- pip install crhelper -t .
- pip install requests -t .
- pip install retrying -t .
- cp -v ../src/*.py .
- zip -r ../sumo_app_utils.zip .
- cd ..
- rm -r python
-fi
+#if [ ! -f sumo_app_utils.zip ]; then
+# echo "creating zip file"
+# mkdir python
+# cd python
+# pip3 install crhelper -t .
+# pip3 install jsonschema==4.17.3 -t .
+# pip3 install requests -t .
+# pip3 install retrying -t .
+# cp -v ../src/*.py .
+# zip -r ../sumo_app_utils.zip .
+# cd ..
+# rm -r python
+#fi
+
+./build.sh
-version="2.0.6"
+version="2.0.21"
aws s3 cp sumo_app_utils.zip s3://$SAM_S3_BUCKET/sumo_app_utils/v"$version"/sumo_app_utils.zip --region $AWS_REGION --acl public-read
-sam package --template-file sumo_app_utils.yaml --s3-bucket $SAM_S3_BUCKET --output-template-file packaged_sumo_app_utils.yaml --s3-prefix "sumo_app_utils/v"$version
+sam package --template-file sumo_app_utils.yaml --s3-bucket $SAM_S3_BUCKET --output-template-file packaged_sumo_app_utils.yaml --s3-prefix "sumo_app_utils/v"$version --region $AWS_REGION --profile $AWS_PROFILE
sam publish --template packaged_sumo_app_utils.yaml --region $AWS_REGION --semantic-version $version
-# sam deploy --template-file packaged_sumo_app_utils.yaml --stack-name testingsumoapputils --capabilities CAPABILITY_IAM --region $AWS_REGION
+
+#sam deploy --template-file packaged_sumo_app_utils.yaml --stack-name testingsumoapputils --capabilities CAPABILITY_IAM --region $AWS_REGION
diff --git a/sumologic-app-utils/packaged_sumo_app_utils.yaml b/sumologic-app-utils/packaged_sumo_app_utils.yaml
index 7a69f2f..bb0bff0 100644
--- a/sumologic-app-utils/packaged_sumo_app_utils.yaml
+++ b/sumologic-app-utils/packaged_sumo_app_utils.yaml
@@ -20,17 +20,17 @@ Metadata:
- sumologic
- serverless
Name: sumologic-app-utils
- SemanticVersion: 2.0.6
- SourceCodeUrl: https://github.com/SumoLogic/sumologic-aws-lambda/tree/master/sumologic-app-utils
+ SemanticVersion: 2.0.21
+ SourceCodeUrl: https://github.com/SumoLogic/sumologic-aws-lambda/tree/main/sumologic-app-utils
SpdxLicenseId: Apache-2.0
- ReadmeUrl: s3://appdevstore/sumo_app_utils/v2.0.6/4d5a92c06a7fa9d956a900e51a1f6be4
+ ReadmeUrl: s3://appdevstore/sumo_app_utils/v2.0.21/4d5a92c06a7fa9d956a900e51a1f6be4
Resources:
SumoAppUtilsFunction:
Type: AWS::Serverless::Function
Properties:
Handler: main.handler
- Runtime: python3.7
- CodeUri: s3://appdevstore/sumo_app_utils/v2.0.6/sumo_app_utils.zip
+ Runtime: python3.13
+ CodeUri: s3://appdevstore/sumo_app_utils/v2.0.21/sumo_app_utils.zip
MemorySize: 128
Timeout: 300
Policies:
@@ -43,6 +43,8 @@ Resources:
Effect: Allow
Resource: arn:aws:cloudtrail:*:*:*
Sid: CreateCloudTrailPolicy
+ Metadata:
+ SamResourceId: SumoAppUtilsFunction
Outputs:
SumoAppUtilsFunction:
Description: SumoAppUtils Function ARN
diff --git a/sumologic-app-utils/src/awsresource.py b/sumologic-app-utils/src/awsresource.py
index 237a99e..b04b82f 100644
--- a/sumologic-app-utils/src/awsresource.py
+++ b/sumologic-app-utils/src/awsresource.py
@@ -202,7 +202,7 @@ def extract_params(self, event):
class EnableS3LogsResources(AWSResource):
def __init__(self, props, *args, **kwargs):
- print('Enabling S3 for ALB aws resource %s' % props.get("AWSResource"))
+ print('Enabling S3 for ALB/ELB-classic aws resource %s' % props.get("AWSResource"))
def _s3_logs_alb_resources(self, region_value, aws_resource, bucket_name, bucket_prefix,
delete_flag, filter_regex, region_account_id, account_id):
@@ -212,8 +212,10 @@ def _s3_logs_alb_resources(self, region_value, aws_resource, bucket_name, bucket
# Fetch and Filter the Resources.
resources = tag_resource.fetch_resources()
- filtered_resources = tag_resource.filter_resources(filter_regex, resources)
-
+ if(not aws_resource == 'elb'):
+ filtered_resources = tag_resource.filter_resources(filter_regex, resources)
+ else:
+ filtered_resources = resources
if filtered_resources:
# Get the ARNs for all resources
arns = tag_resource.get_arn_list(filtered_resources)
@@ -391,22 +393,35 @@ def enable_s3_logs(event, context):
account_id = os.environ.get("AccountID")
filter_regex = os.environ.get("Filter")
region_account_id = os.environ.get("RegionAccountId")
-
+ is_elbClassic = False
if "detail" in event:
event_detail = event.get("detail")
event_name = event_detail.get("eventName")
+ try:
+ if(event_name=="CreateLoadBalancer" and event_detail.get("requestParameters").get("loadBalancerName")!=None):
+ is_elbClassic=True
+ event_name="ELBClassicCreate"
+ except KeyError:
+ print("Do Nothing")
region_value = event_detail.get("awsRegion")
# Get the class instance based on Cloudtrail Event Name
- alb_resource = AWSResourcesProvider.get_provider(event_name, region_value, account_id)
- event_detail = alb_resource.filter_resources(filter_regex, event_detail)
+ if(not is_elbClassic):
+ alb_resource = AWSResourcesProvider.get_provider(event_name, region_value, account_id)
+ event_detail = alb_resource.filter_resources(filter_regex, event_detail)
- if event_detail:
- # Get the arns from the event.
- resources = alb_resource.get_arn_list_cloud_trail_event(event_detail)
+ if event_detail:
+ # Get the arns from the event.
+ resources = alb_resource.get_arn_list_cloud_trail_event(event_detail)
- # Enable S3 logging
- alb_resource.enable_s3_logs(resources, bucket_name, bucket_prefix, region_account_id)
+ # Enable S3 logging
+ alb_resource.enable_s3_logs(resources, bucket_name, bucket_prefix, region_account_id)
+ else:
+ elb_resource = AWSResourcesProvider.get_provider(event_name, region_value, account_id)
+ event_detail = elb_resource.filter_resources(filter_regex, event_detail)
+ if event_detail:
+ resources = elb_resource.get_arn_list_cloud_trail_event(event_detail)
+ elb_resource.enable_s3_logs(resources, bucket_name, bucket_prefix, region_account_id)
print("AWS S3 ENABLE ALB :- Completed s3 logs enable")
@@ -423,7 +438,8 @@ class AWSResourcesAbstract(object):
"CreateDBCluster": "rds",
"CreateDBInstance": "rds",
"CreateLoadBalancer": "elbv2",
- "CreateBucket": "s3"
+ "CreateBucket": "s3",
+ "ELBClassicCreate": "elb"
}
def __init__(self, aws_resource, region_value, account_id):
@@ -1152,6 +1168,140 @@ def disable_s3_logs(self, arns, s3_bucket):
if flow_ids:
self.client.delete_flow_logs(FlowLogIds=flow_ids)
+class ElbResource(AWSResourcesAbstract):
+ def fetch_resources(self):
+ resources = []
+ next_token = None
+ while next_token != 'END':
+ if next_token:
+ response = self.client.describe_load_balancers(PageSize=400, Marker=next_token)
+ else:
+ response = self.client.describe_load_balancers(PageSize=400)
+
+ if "LoadBalancerDescriptions" in response:
+ resources.extend(response['LoadBalancerDescriptions'])
+
+ next_token = response["NextMarker"] if "NextMarker" in response else None
+
+ if not next_token:
+ next_token = 'END'
+
+ return resources
+ #there are no arn's associated with Classic elb
+ def get_arn_list(self, resources):
+
+ names = []
+ if resources:
+ for resource in resources:
+ names.append(resource['LoadBalancerName'])
+ return names
+
+ def process_tags(self, tags):
+ tags_key_value = []
+ for k, v in tags.items():
+ tags_key_value.append({'Key': k, 'Value': v})
+
+ return tags_key_value
+
+ def get_arn_list_cloud_trail_event(self, event_detail):
+ lb_name = []
+ request_parameters = event_detail.get("requestParameters")
+ if request_parameters and "loadBalancerName" in request_parameters:
+ lb_name.append(request_parameters.get("loadBalancerName"))
+ return lb_name
+ return None
+
+ @retry(retry_on_exception=lambda exc: isinstance(exc, ClientError), stop_max_attempt_number=10,
+ wait_exponential_multiplier=2000, wait_exponential_max=10000)
+ def tag_resources_cloud_trail_event(self, names, tags):
+ self.client.add_tags(LoadBalancerNames=names, Tags=tags)
+
+ def enable_s3_logs(self, names, s3_bucket, s3_prefix, elb_region_account_id):
+ for name in names:
+ print("Enable S3 logging for ALB " + name)
+ response = self.client.describe_load_balancer_attributes(LoadBalancerName=name)
+ if "LoadBalancerAttributes" in response:
+ access_logs = response.get("LoadBalancerAttributes").get("AccessLog")
+ if(access_logs["Enabled"]==False):
+ access_logs["Enabled"]=True
+ access_logs["S3BucketName"]=s3_bucket
+ access_logs["S3BucketPrefix"]=s3_prefix
+ try:
+ self.client.modify_load_balancer_attributes(LoadBalancerName=name, LoadBalancerAttributes=response.get("LoadBalancerAttributes"))
+ time.sleep(10)
+ except ClientError as e:
+ if "Error" in e.response and "Message" in e.response["Error"] \
+ and "Access Denied for bucket" in e.response['Error']['Message']:
+ self.add_bucket_policy(s3_bucket, elb_region_account_id)
+ time.sleep(10)
+ self.client.modify_load_balancer_attributes(LoadBalancerName=name, LoadBalancerAttributes=response)
+ else:
+ raise e
+
+ def add_bucket_policy(self, bucket_name, elb_region_account_id):
+ print("Adding policy to the bucket " + bucket_name)
+ s3 = boto3.client('s3')
+ try:
+ response = s3.get_bucket_policy(Bucket=bucket_name)
+ existing_policy = json.loads(response["Policy"])
+ except ClientError as e:
+ if "Error" in e.response and "Code" in e.response["Error"] \
+ and e.response['Error']['Code'] == "NoSuchBucketPolicy":
+ existing_policy = {
+ "Version": "2012-10-17",
+ "Statement": [
+ ]
+ }
+ else:
+ raise e
+
+ bucket_policy = [{
+ 'Sid': 'AwsElbLogs',
+ 'Effect': 'Allow',
+ 'Principal': {
+ "AWS": "arn:aws:iam::" + elb_region_account_id + ":root"
+ },
+ 'Action': ['s3:PutObject'],
+ 'Resource': f'arn:aws:s3:::{bucket_name}/*'
+ },
+ {
+ "Sid": "AWSLogDeliveryAclCheck",
+ "Effect": "Allow",
+ "Principal": {
+ "Service": "delivery.logs.amazonaws.com"
+ },
+ "Action": "s3:GetBucketAcl",
+ "Resource": "arn:aws:s3:::" + bucket_name
+ },
+ {
+ "Sid": "AWSLogDeliveryWrite",
+ "Effect": "Allow",
+ "Principal": {
+ "Service": "delivery.logs.amazonaws.com"
+ },
+ "Action": "s3:PutObject",
+ "Resource": "arn:aws:s3:::" + bucket_name + "/*",
+ "Condition": {
+ "StringEquals": {
+ "s3:x-amz-acl": "bucket-owner-full-control"
+ }
+ }
+ }]
+ existing_policy["Statement"].extend(bucket_policy)
+
+ s3.put_bucket_policy(Bucket=bucket_name, Policy=json.dumps(existing_policy))
+
+ def disable_s3_logs(self, names, s3_bucket):
+ attributes = [{'Key': 'access_logs.s3.enabled', 'Value': 'false'}]
+
+ for name in names:
+ response = self.client.describe_load_balancer_attributes(LoadBalancerName=name)
+ if "LoadBalancerAttributes" in response:
+ access_logs = response.get("LoadBalancerAttributes").get("AccessLog")
+ if(access_logs["Enabled"]==True):
+ access_logs["Enabled"]=False
+ self.client.modify_load_balancer_attributes(LoadBalancerName=name, LoadBalancerAttributes=response.get("LoadBalancerAttributes"))
+ time.sleep(1)
class AWSResourcesProvider(object):
provider_map = {
@@ -1173,7 +1323,9 @@ class AWSResourcesProvider(object):
"s3": S3Resource,
"CreateBucket": S3Resource,
"vpc": VpcResource,
- "CreateVpc": VpcResource
+ "CreateVpc": VpcResource,
+ "ELBClassicCreate": ElbResource,
+ "elb": ElbResource,
}
@classmethod
diff --git a/sumologic-app-utils/src/constants.py b/sumologic-app-utils/src/constants.py
new file mode 100644
index 0000000..69ff6d7
--- /dev/null
+++ b/sumologic-app-utils/src/constants.py
@@ -0,0 +1,23 @@
+
+
+# Define the JSON schema
+AWS_TAG_FILTERS_SCHEMA = {
+ "$schema": "http://json-schema.org/draft-07/schema#",
+ "type": "object",
+ "patternProperties": {
+ "^[A-Za-z0-9_/]+$": {
+ "type": "object",
+ "properties": {
+ "tags": {
+ "type": "array",
+ "items": {
+ "type": "string"
+ }
+ }
+ },
+ "required": ["tags"],
+ "additionalProperties": False
+ }
+ },
+ "additionalProperties": False
+}
diff --git a/sumologic-app-utils/src/main.py b/sumologic-app-utils/src/main.py
index 3ecae04..256ea3f 100644
--- a/sumologic-app-utils/src/main.py
+++ b/sumologic-app-utils/src/main.py
@@ -30,7 +30,7 @@ def create(event, context):
data, resource_id = resource.create(**params)
except Exception as e:
raise e
- print(data)
+ #print(data)
print(resource_id)
helper.Data.update(data)
helper.Status = "SUCCESS"
@@ -42,7 +42,7 @@ def create(event, context):
def update(event, context):
resource, resource_type, params = get_resource(event)
data, resource_id = resource.update(**params)
- print(data)
+ #print(data)
print(resource_id)
helper.Data.update(data)
helper.Status = "SUCCESS"
diff --git a/sumologic-app-utils/src/sumologic.py b/sumologic-app-utils/src/sumologic.py
index db67e08..c1b096b 100644
--- a/sumologic-app-utils/src/sumologic.py
+++ b/sumologic-app-utils/src/sumologic.py
@@ -5,17 +5,12 @@
from requests.adapters import HTTPAdapter
from requests.packages.urllib3.util.retry import Retry
-try:
- import cookielib
-except ImportError:
- import http.cookiejar as cookielib
-
DEFAULT_VERSION = 'v1'
class SumoLogic(object):
- def __init__(self, accessId, accessKey, endpoint=None, cookieFile='cookies.txt'):
+ def __init__(self, accessId, accessKey, endpoint=None):
self.session = requests.Session()
retries = Retry(total=3, backoff_factor=1, status_forcelist=[500, 502, 503, 504, 429])
adapter = HTTPAdapter(max_retries=retries)
@@ -23,15 +18,20 @@ def __init__(self, accessId, accessKey, endpoint=None, cookieFile='cookies.txt')
self.session.mount('http://', adapter)
self.session.auth = (accessId, accessKey)
self.session.headers = {'content-type': 'application/json', 'accept': 'application/json'}
- cj = cookielib.FileCookieJar(cookieFile)
- self.session.cookies = cj
if endpoint is None:
self.endpoint = self._get_endpoint()
else:
self.endpoint = endpoint
+ self.set_cookies()
if self.endpoint[-1:] == "/":
raise Exception("Endpoint should not end with a slash character")
+ def set_cookies(self):
+ url = f"{self.endpoint}/v1/"
+ print("Setting Cookies")
+ self.session.get(url)
+ return None
+
def _get_endpoint(self):
"""
SumoLogic REST API endpoint changes based on the geo location of the client.
@@ -52,19 +52,19 @@ def _get_endpoint(self):
def get_versioned_endpoint(self, version):
return self.endpoint + '/%s' % version
- def delete(self, method, params=None, version=DEFAULT_VERSION):
+ def delete(self, method, params=None, headers=None, version=DEFAULT_VERSION):
endpoint = self.get_versioned_endpoint(version)
- time.sleep(uniform(2, 5))
- r = self.session.delete(endpoint + method, params=params)
+ time.sleep(uniform(2, 4))
+ r = self.session.delete(endpoint + method, params=params, headers=headers)
if 400 <= r.status_code < 600:
r.reason = r.text
r.raise_for_status()
return r
- def get(self, method, params=None, version=DEFAULT_VERSION):
+ def get(self, method, params=None, headers=None, version=DEFAULT_VERSION):
endpoint = self.get_versioned_endpoint(version)
- time.sleep(uniform(2, 5))
- r = self.session.get(endpoint + method, params=params)
+ time.sleep(uniform(2, 4))
+ r = self.session.get(endpoint + method, params=params, headers=headers)
if 400 <= r.status_code < 600:
r.reason = r.text
r.raise_for_status()
@@ -72,7 +72,7 @@ def get(self, method, params=None, version=DEFAULT_VERSION):
def post(self, method, params, headers=None, version=DEFAULT_VERSION):
endpoint = self.get_versioned_endpoint(version)
- time.sleep(uniform(2, 5))
+ time.sleep(uniform(2, 4))
r = self.session.post(endpoint + method, data=json.dumps(params), headers=headers)
if 400 <= r.status_code < 600:
r.reason = r.text
@@ -81,7 +81,7 @@ def post(self, method, params, headers=None, version=DEFAULT_VERSION):
def put(self, method, params, headers=None, version=DEFAULT_VERSION):
endpoint = self.get_versioned_endpoint(version)
- time.sleep(uniform(2, 5))
+ time.sleep(uniform(2, 4))
r = self.session.put(endpoint + method, data=json.dumps(params), headers=headers)
if 400 <= r.status_code < 600:
r.reason = r.text
@@ -201,44 +201,52 @@ def millisectimestamp(ts):
r = self.post('/metrics/results', params)
return json.loads(r.text)
- def delete_folder(self, folder_id):
- return self.delete('/content/%s/delete' % folder_id, version='v2')
+ def delete_folder(self, folder_id, is_admin=False):
+ headers = {'isAdminMode': 'true'} if is_admin else {}
+ return self.delete('/content/%s/delete' % folder_id, headers=headers, version='v2')
- def create_folder(self, name, description, parent_folder_id):
+ def create_folder(self, name, description, parent_folder_id, is_admin=False):
+ headers = {'isAdminMode': 'true'} if is_admin else {}
content = {
"name": name,
"description": description,
"parentId": parent_folder_id
}
- return self.post('/content/folders', params=content, version='v2')
+ return self.post('/content/folders', headers=headers, params=content, version='v2')
def get_personal_folder(self):
return self.get('/content/folders/personal', version='v2')
- def get_folder_by_id(self, folder_id):
- response = self.get('/content/folders/%s' % folder_id, version='v2')
+ def get_folder_by_id(self, folder_id, is_admin=False):
+ headers = {'isAdminMode': 'true'} if is_admin else {}
+ response = self.get('/content/folders/%s' % folder_id, version='v2', headers=headers)
return json.loads(response.text)
- def update_folder_by_id(self, folder_id, content):
- response = self.put('/content/folders/%s' % folder_id, version='v2', params=content)
+ def update_folder_by_id(self, folder_id, content, is_admin=False):
+ headers = {'isAdminMode': 'true'} if is_admin else {}
+ response = self.put('/content/folders/%s' % folder_id, version='v2', headers=headers, params=content)
return json.loads(response.text)
- def copy_folder(self, folder_id, parent_folder_id):
- return self.post('/content/%s/copy?destinationFolder=%s' % (folder_id, parent_folder_id), params={},
- version='v2')
+ def copy_folder(self, folder_id, parent_folder_id, is_admin=False):
+ headers = {'isAdminMode': 'true'} if is_admin else {}
+ return self.post('/content/%s/copy?destinationFolder=%s' % (folder_id, parent_folder_id), headers=headers,
+ params={}, version='v2')
- def import_content(self, folder_id, content, is_overwrite="false"):
- return self.post('/content/folders/%s/import?overwrite=%s' % (folder_id, is_overwrite), params=content,
- version='v2')
+ def import_content(self, folder_id, content, is_overwrite="false", is_admin=False):
+ headers = {'isAdminMode': 'true'} if is_admin else {}
+ return self.post('/content/folders/%s/import?overwrite=%s' % (folder_id, is_overwrite), headers=headers,
+ params=content, version='v2')
- def check_import_status(self, folder_id, job_id):
- return self.get('/content/folders/%s/import/%s/status' % (folder_id, job_id), version='v2')
+ def check_import_status(self, folder_id, job_id, is_admin=False):
+ headers = {'isAdminMode': 'true'} if is_admin else {}
+ return self.get('/content/folders/%s/import/%s/status' % (folder_id, job_id), version='v2', headers=headers)
def check_copy_status(self, folder_id, job_id):
return self.get('/content/%s/copy/%s/status' % (folder_id, job_id), version='v2')
- def install_app(self, app_id, content):
- return self.post('/apps/%s/install' % (app_id), params=content)
+ def install_app(self, app_id, content, is_admin=False):
+ headers = {'isAdminMode': 'true'} if is_admin else {}
+ return self.post('/apps/%s/install' % (app_id), headers=headers, params=content)
def check_app_install_status(self, job_id):
return self.get('/apps/install/%s/status' % job_id)
@@ -306,6 +314,10 @@ def import_monitors(self, folder_id, content):
response = self.post('/monitors/%s/import' % folder_id, params=content)
return json.loads(response.text)
+ def set_monitors_permissions(self, content):
+ response = self.put('/monitors/permissions/set', params=content)
+ return json.loads(response.text)
+
def export_monitors(self, folder_id):
response = self.get('/monitors/%s/export' % folder_id)
return json.loads(response.text)
diff --git a/sumologic-app-utils/src/sumoresource.py b/sumologic-app-utils/src/sumoresource.py
index 7b6961b..059c7fa 100644
--- a/sumologic-app-utils/src/sumoresource.py
+++ b/sumologic-app-utils/src/sumoresource.py
@@ -2,16 +2,19 @@
import re
import tempfile
import time
+import csv
from abc import abstractmethod
from datetime import datetime
from random import uniform
+
import requests
import six
+import jsonschema
from resourcefactory import AutoRegisterResource
from sumologic import SumoLogic
from awsresource import AWSResourcesProvider
-
+from constants import *
@six.add_metaclass(AutoRegisterResource)
class SumoResource(object):
@@ -41,7 +44,7 @@ def extract_params(self, event):
def api_endpoint(self):
if self.deployment == "us1":
return "https://api.sumologic.com/api"
- elif self.deployment in ["ca", "au", "de", "eu", "jp", "us2", "fed", "in"]:
+ elif self.deployment in ["ca", "au", "de", "eu", "jp", "us2", "fed", "kr", "ch"]:
return "https://api.%s.sumologic.com/api" % self.deployment
else:
return 'https://%s-api.sumologic.net/api' % self.deployment
@@ -108,7 +111,8 @@ def create(self, collector_type, collector_name, source_category=None, descripti
collector_id = json.loads(resp.text)['collector']['id']
print("created collector %s" % collector_id)
except Exception as e:
- if hasattr(e, 'response') and "code" in e.response.json() and e.response.json()["code"] == 'collectors.validation.name.duplicate':
+ if hasattr(e, 'response') and "code" in e.response.json() and e.response.json()[
+ "code"] == 'collectors.validation.name.duplicate':
collector = self._get_collector_by_name(collector_name, collector_type.lower())
collector_id = collector['id']
print("fetched existing collector %s" % collector_id)
@@ -267,7 +271,8 @@ def build_common_source_params(self, props, source_json=None):
})
# timestamp processing
if props.get("DateFormat"):
- source_json["defaultDateFormats"] = [{"format": props.get("DateFormat"), "locator": props.get("DateLocatorRegex")}]
+ source_json["defaultDateFormats"] = [
+ {"format": props.get("DateFormat"), "locator": props.get("DateLocatorRegex")}]
# processing rules
if 'filters' in props and isinstance(props['filters'], list):
@@ -316,6 +321,36 @@ def build_source_params(self, props, source_json=None):
})
return source_json
+ @staticmethod
+ def _prepare_aws_filter_tags(props):
+ filters = []
+
+ namespaces = props.get("Namespaces", [])
+ namespaces = [namespace for namespace in namespaces if namespace.strip().startswith('AWS/')]
+ aws_tag_filters = props.get("AWSTagFilters", {})
+ if aws_tag_filters:
+ # Convert the string to JSON (Python dictionary)
+ try:
+ aws_tag_filters = json.loads(aws_tag_filters)
+ jsonschema.validate(instance=aws_tag_filters, schema=AWS_TAG_FILTERS_SCHEMA)
+ print("Converted AWS tag filters JSON:", aws_tag_filters)
+ except json.JSONDecodeError as e:
+ print("Invalid AWS tag filters JSON:", e)
+ aws_tag_filters = {}
+ except jsonschema.exceptions.ValidationError as e:
+ print(f"JSON validation error: {e.message}")
+ aws_tag_filters = {}
+ else:
+ aws_tag_filters = {}
+ for key, value in aws_tag_filters.items():
+ if key in namespaces or key.lower() == "all":
+ filters.append({
+ "type": "TagFilters",
+ "namespace": key,
+ "tags": value["tags"]
+ })
+ return filters
+
def _get_path(self, props):
source_type = props.get("SourceType")
@@ -335,6 +370,9 @@ def _get_path(self, props):
path["limitToRegions"] = regions
if "Namespaces" in props:
path["limitToNamespaces"] = props.get("Namespaces")
+ aws_filter_tag = self._prepare_aws_filter_tags(props)
+ if aws_filter_tag:
+ path["tagFilters"] = aws_filter_tag
if source_type == "AwsCloudWatch":
path["type"] = "CloudWatchPath"
else:
@@ -353,7 +391,8 @@ def create(self, collector_id, source_name, props, *args, **kwargs):
print("created source %s" % source_id)
except Exception as e:
# Todo 100 sources in a collector is good. Same error code for duplicates in case of Collector and source.
- if hasattr(e, 'response') and "code" in e.response.json() and e.response.json()["code"] == 'collectors.validation.name.duplicate':
+ if hasattr(e, 'response') and "code" in e.response.json() and e.response.json()[
+ "code"] == 'collectors.validation.name.duplicate':
for source in self.sumologic_cli.sources(collector_id, limit=300):
if source["name"] == source_name:
source_id = source["id"]
@@ -385,36 +424,82 @@ def delete(self, collector_id, source_id, remove_on_delete_stack, props, *args,
print("skipping source deletion")
-class HTTPSource(SumoResource):
- # Todo refactor this to use basesource class
+class HTTPSource(BaseSource):
- def create(self, collector_id, source_name, source_category, fields, message_per_request,
- date_format=None, date_locator="\"timestamp\": (.*),", *args, **kwargs):
+ def build_source_params(self, props, source_json=None):
- endpoint = source_id = None
- params = {
- "sourceType": "HTTP",
- "name": source_name,
- "messagePerRequest": message_per_request,
- "multilineProcessingEnabled": False if message_per_request else True,
- "category": source_category
- }
- if date_format:
- params["defaultDateFormats"] = [{"format": date_format, "locator": date_locator}]
+ source_json = self.build_common_source_params(props, source_json)
- # Fields condition
- if fields:
- params['fields'] = fields
+ source_json["messagePerRequest"] = props.get("MessagePerRequest") == 'true'
+ source_json["multilineProcessingEnabled"] = False if source_json["messagePerRequest"] else True
+ source_json["sourceType"] = "HTTP"
+
+ if props.get("SourceType"):
+ source_json["contentType"] = props.get("SourceType")
+ if "RoleArn" in props and props.get("RoleArn"):
+ source_json.update({
+ "thirdPartyRef": {
+ "resources": [{
+ "serviceType": props.get("SourceType"),
+ "path": self._get_path(props),
+ "authentication": {
+ "type": "AWSRoleBasedAuthentication",
+ "roleARN": props.get("RoleArn")
+ }
+ }]
+ }
+ })
+ return source_json
+
+ @staticmethod
+ def _prepare_aws_filter_tags(props):
+ filters = []
+ aws_tag_filters = props.get("AWSTagFilters", {})
+ if aws_tag_filters:
+ # Convert the string to JSON (Python dictionary)
+ try:
+ aws_tag_filters = json.loads(aws_tag_filters)
+ jsonschema.validate(instance=aws_tag_filters, schema=AWS_TAG_FILTERS_SCHEMA)
+ print("Converted AWS tag filters JSON:", aws_tag_filters)
+ except json.JSONDecodeError as e:
+ print("Invalid AWS tag filters JSON:", e)
+ aws_tag_filters = {}
+ except jsonschema.exceptions.ValidationError as e:
+ print(f"JSON validation error: {e.message}")
+ aws_tag_filters = {}
+ else:
+ aws_tag_filters = {}
+ for key, value in aws_tag_filters.items():
+ if key.strip().startswith('AWS/') or key.lower() == "all":
+ filters.append({
+ "type": "TagFilters",
+ "namespace": key,
+ "tags": value["tags"]
+ })
+ return filters
+ def _get_path(self, props):
+ path = {
+ "type": props.get("SourceType") + "Path",
+ }
+ aws_filter_tag = self._prepare_aws_filter_tags(props)
+ if aws_filter_tag:
+ path["tagFilters"] = aws_filter_tag
+ return path
+
+ def create(self, collector_id, source_name, props, *args, **kwargs):
+ endpoint = source_id = None
+ source_json = {"source": self.build_source_params(props)}
try:
- resp = self.sumologic_cli.create_source(collector_id, {"source": params})
+ resp = self.sumologic_cli.create_source(collector_id, source_json)
data = resp.json()['source']
source_id = data["id"]
endpoint = data["url"]
print("created source %s" % source_id)
except Exception as e:
# Todo 100 sources in a collector is good
- if hasattr(e, 'response') and "code" in e.response.json() and e.response.json()["code"] == 'collectors.validation.name.duplicate':
+ if hasattr(e, 'response') and "code" in e.response.json() and e.response.json()[
+ "code"] == 'collectors.validation.name.duplicate':
for source in self.sumologic_cli.sources(collector_id, limit=300):
if source["name"] == source_name:
source_id = source["id"]
@@ -424,18 +509,10 @@ def create(self, collector_id, source_name, source_category, fields, message_per
raise
return {"SUMO_ENDPOINT": endpoint}, source_id
- def update(self, collector_id, source_id, source_name, source_category, fields, date_format=None, date_locator=None, *args,
+ def update(self, collector_id, source_id, source_name, props, *args,
**kwargs):
sv, etag = self.sumologic_cli.source(collector_id, source_id)
- sv['source']['category'] = source_category
- sv['source']['name'] = source_name
- if date_format:
- sv['source']["defaultDateFormats"] = [{"format": date_format, "locator": date_locator}]
- # Fields condition
- existing_fields = sv['source']['fields']
- if fields:
- existing_fields.update(fields)
- sv['source']['fields'] = existing_fields
+ sv['source'] = self.build_source_params(props, sv['source'])
resp = self.sumologic_cli.update_source(collector_id, sv, etag)
data = resp.json()['source']
@@ -455,19 +532,11 @@ def extract_params(self, event):
if event.get('PhysicalResourceId'):
_, source_id = event['PhysicalResourceId'].split("/")
- fields = {}
- if 'Fields' in props:
- fields = props.get("Fields")
-
return {
"collector_id": props.get("CollectorId"),
"source_name": props.get("SourceName"),
- "source_category": props.get("SourceCategory"),
- "date_format": props.get("DateFormat"),
- "date_locator": props.get("DateLocatorRegex"),
- "message_per_request": props.get("MessagePerRequest") == 'true',
"source_id": source_id,
- "fields": fields
+ "props": props,
}
@@ -489,29 +558,36 @@ def _replace_source_category(self, appjson_filepath, sourceDict):
return appjson
- def _add_time_suffix(self, appjson):
+ @staticmethod
+ def _add_time_suffix(appjson):
date_format = "%Y-%m-%d %H:%M:%S"
appjson['name'] = appjson['name'] + "-" + datetime.utcnow().strftime(date_format)
return appjson
- def _get_app_folder(self, appdata, parent_id):
+ def _get_app_folder(self, appdata, parent_id, is_admin=False):
folder_id = None
try:
- response = self.sumologic_cli.create_folder(appdata["name"], appdata["description"][:255], parent_id)
+ response = self.sumologic_cli.create_folder(appdata["name"], appdata["description"][:255], parent_id,
+ is_admin)
folder_id = response.json()["id"]
except Exception as e:
if hasattr(e, 'response') and "errors" in e.response.json() and e.response.json()["errors"]:
errors = e.response.json()["errors"]
for error in errors:
if error.get('code') == 'content:duplicate_content':
- folder_details = self.sumologic_cli.get_folder_by_id(parent_id)
- if "children" in folder_details:
- for children in folder_details["children"]:
- if "name" in children and children["name"] == appdata["name"]:
- return children["id"]
+ folder_details = self.sumologic_cli.get_folder_by_id(parent_id, is_admin)
+ return self.get_child_folder_id(folder_details, appdata["name"])
raise
return folder_id
+ @staticmethod
+ def get_child_folder_id(data, folder_name):
+ if "children" in data:
+ for folder in data["children"]:
+ if "name" in folder and folder["name"] == folder_name:
+ return folder["id"]
+ return None
+
def _get_app_content(self, appname, source_params, s3url=None):
# Based on S3 URL provided download the data.
if not s3url:
@@ -531,13 +607,13 @@ def _get_app_content(self, appname, source_params, s3url=None):
return appjson
- def _wait_for_folder_creation(self, folder_id, job_id):
+ def _wait_for_folder_creation(self, folder_id, job_id, is_admin):
print("waiting for folder creation folder_id %s job_id %s" % (folder_id, job_id))
waiting = True
while waiting:
- response = self.sumologic_cli.check_import_status(folder_id, job_id)
+ response = self.sumologic_cli.check_import_status(folder_id, job_id, is_admin)
waiting = response.json()['status'] == "InProgress"
- time.sleep(5)
+ time.sleep(2)
print("job status: %s" % response.text)
@@ -547,7 +623,7 @@ def _wait_for_folder_copy(self, folder_id, job_id):
while waiting:
response = self.sumologic_cli.check_copy_status(folder_id, job_id)
waiting = response.json()['status'] == "InProgress"
- time.sleep(5)
+ time.sleep(2)
print("job status: %s" % response.text)
matched = re.search('id:\s*(.*?)\"', response.text)
@@ -562,16 +638,16 @@ def _wait_for_app_install(self, job_id):
while waiting:
response = self.sumologic_cli.check_app_install_status(job_id)
waiting = response.json()['status'] == "InProgress"
- time.sleep(5)
+ time.sleep(2)
print("job status: %s" % response.text)
return response
- def _create_backup_folder(self, new_app_folder_id, old_app_folder_id):
- new_folder_details = self.sumologic_cli.get_folder_by_id(new_app_folder_id)
+ def _create_backup_folder(self, new_app_folder_id, old_app_folder_id, is_admin):
+ new_folder_details = self.sumologic_cli.get_folder_by_id(new_app_folder_id, is_admin)
parent_folder_id = new_folder_details["parentId"]
- old_folder_details = self.sumologic_cli.get_folder_by_id(old_app_folder_id)
- old_parent_folder_details = self.sumologic_cli.get_folder_by_id(old_folder_details["parentId"])
+ old_folder_details = self.sumologic_cli.get_folder_by_id(old_app_folder_id, is_admin)
+ old_parent_folder_details = self.sumologic_cli.get_folder_by_id(old_folder_details["parentId"], is_admin)
if old_parent_folder_details.get("parentId") == "0000000000000000":
back_up = "Back Up Old App"
@@ -580,57 +656,131 @@ def _create_backup_folder(self, new_app_folder_id, old_app_folder_id):
backup_folder_id = self._get_app_folder({"name": back_up,
"description": "The folder contains back up of all the apps that are updated using CloudFormation template."},
- parent_folder_id)
+ parent_folder_id, is_admin)
return backup_folder_id
- def _create_or_fetch_apps_parent_folder(self, folder_prefix):
- response = self.sumologic_cli.get_personal_folder()
+ def get_admin_recommended_id(self, is_admin):
+ """ Sync call to get the Admin Recommended folder Id """
+ return self.get_admin_recommended(is_admin)["id"]
+
+ def get_admin_recommended(self, is_admin):
+ """ Sync call to get the Admin Recommended folder content """
+ headers = {"isAdminMode": "true"} if is_admin else {}
+
+ job_response = self.sumologic_cli.get("/content/folders/adminRecommended", version="v2",
+ headers=headers)
+ job_id = json.loads(job_response.text)["id"]
+
+ print("Got Admin Recommended job " + job_id + " , now checking for status")
+ while True:
+ status_response = self.sumologic_cli.get(f"/content/folders/adminRecommended/{job_id}/status",
+ version="v2")
+ data = json.loads(status_response.text)
+ if data["status"] == "InProgress":
+ print('Admin Recommended job in progress')
+ time.sleep(1)
+ else:
+ if data["status"] == "Failed":
+ raise Exception("Not able to fetch Admin Recommended Folder")
+ else:
+ status_response = self.sumologic_cli.get(f"/content/folders/adminRecommended/{job_id}/result",
+ version="v2")
+ return status_response
+
+ def share_content_with_org(self, is_share, content_id, org_id, is_admin):
+ """ Share or Revoke a given content item """
+ action = "remove"
+ if is_share:
+ action = "add"
+ payload = {"contentPermissionAssignments": [
+ {
+ "permissionName": "View",
+ "sourceType": "org",
+ "sourceId": org_id,
+ "contentId": content_id
+ }],
+ "notifyRecipients": False,
+ "notificationMessage": ""
+ }
+ print(f"Updating action: {action} on payload: {json.dumps(payload)}")
+ headers = {'isAdminMode': 'true'} if is_admin else {}
+ response = self.sumologic_cli.put(f"/content/{content_id}/permissions/{action}", params=payload,
+ headers=headers, version="v2")
+ if response.status_code == 200:
+ return response
+ else:
+ raise Exception(f"Unable to share {content_id} in org: {org_id}")
+
+
+ def share_app_by_id(self, is_share, app_folder_id, org_id, is_admin):
+ """ This shares an app identified by its Id under the Admin Recommended folder """
+ response = self.share_content_with_org(is_share, app_folder_id, org_id, is_admin)
+ if response.status_code == 200:
+ print('Shared ' + app_folder_id + ' in org ' + org_id)
+ else:
+ raise Exception(
+ f"Folder Sharing Failed for folder id: {app_folder_id} in {org_id} response: {response.text}")
+
+ def _create_or_fetch_apps_parent_folder(self, folder_prefix, org_id, is_share=False, location=None):
+ is_admin = False
+ if location == 'admin':
+ parent_content = self.get_admin_recommended(is_admin=True).json()
+ parent_folder_id = parent_content["id"]
+ is_admin = True
+ else:
+ parent_content = self.sumologic_cli.get_personal_folder().json()
+ parent_folder_id = parent_content['id']
folder_name = folder_prefix + str(datetime.now().strftime(" %d-%b-%Y"))
description = "This folder contains all the apps created as a part of Sumo Logic Solutions."
try:
- folder = self.sumologic_cli.create_folder(folder_name, description, response.json()['id'])
+ folder = self.sumologic_cli.create_folder(folder_name, description, parent_folder_id, is_admin)
+ self.share_app_by_id(is_share, folder.json()["id"], org_id, is_admin)
return folder.json()["id"]
except Exception as e:
if hasattr(e, 'response') and "errors" in e.response.json() and e.response.json()["errors"]:
errors = e.response.json()["errors"]
for error in errors:
if error.get('code') == 'content:duplicate_content':
- response = self.sumologic_cli.get_personal_folder()
- if "children" in response.json():
- for children in response.json()["children"]:
- if "name" in children and children["name"] == folder_name:
- return children["id"]
+ print(f"The folder already exists. Updating sharing permissions for: {is_share}")
+ child_folder_id = self.get_child_folder_id(parent_content, folder_name)
+ self.share_app_by_id(is_share, child_folder_id, org_id, is_admin)
+ return child_folder_id
raise
- def create_by_import_api(self, appname, source_params, folder_name, s3url, *args, **kwargs):
+ def create_by_import_api(self, appname, source_params, folder_name, s3url, org_id, location, is_share, *args, **kwargs):
# Add retry if folder sync fails
if appname in self.ENTERPRISE_ONLY_APPS and not self.is_enterprise_or_trial_account():
raise Exception("%s is available to Enterprise or Trial Account Type only." % appname)
content = self._get_app_content(appname, source_params, s3url)
-
+ is_admin = False
if folder_name:
- folder_id = self._create_or_fetch_apps_parent_folder(folder_name)
+ folder_id = self._create_or_fetch_apps_parent_folder(folder_name, org_id, is_share, location)
else:
response = self.sumologic_cli.get_personal_folder()
folder_id = response.json()['id']
- app_folder_id = self._get_app_folder(content, folder_id)
- time.sleep(5)
- response = self.sumologic_cli.import_content(folder_id, content, is_overwrite="true")
+ if location == "admin":
+ is_admin = True
+ app_folder_id = self._get_app_folder(content, folder_id, True)
+ time.sleep(3)
+ response = self.sumologic_cli.import_content(folder_id, content, is_overwrite="true", is_admin=is_admin)
+ else:
+ app_folder_id = self._get_app_folder(content, folder_id)
+ time.sleep(3)
+ response = self.sumologic_cli.import_content(folder_id, content, is_overwrite="true")
job_id = response.json()["id"]
- print("installed app %s: appFolderId: %s personalFolderId: %s jobId: %s" % (
+ print("Imported app %s: appFolderId: %s FolderId: %s jobId: %s" % (
appname, app_folder_id, folder_id, job_id))
- self._wait_for_folder_creation(folder_id, job_id)
+ self._wait_for_folder_creation(folder_id, job_id, is_admin)
return {"APP_FOLDER_NAME": content["name"]}, app_folder_id
- def create_by_install_api(self, appid, appname, source_params, folder_name, *args, **kwargs):
+ def create_by_install_api(self, appid, appname, source_params, folder_name, org_id, location, is_share, *args,
+ **kwargs):
if appname in self.ENTERPRISE_ONLY_APPS and not self.is_enterprise_or_trial_account():
raise Exception("%s is available to Enterprise or Trial Account Type only." % appname)
- folder_id = None
-
if folder_name:
- folder_id = self._create_or_fetch_apps_parent_folder(folder_name)
+ folder_id = self._create_or_fetch_apps_parent_folder(folder_name, org_id, is_share, location)
else:
response = self.sumologic_cli.get_personal_folder()
folder_id = response.json()['id']
@@ -638,12 +788,15 @@ def create_by_install_api(self, appid, appname, source_params, folder_name, *arg
content = {'name': appname + datetime.now().strftime(" %d-%b-%Y %H:%M:%S"), 'description': appname,
'dataSourceValues': source_params, 'destinationFolderId': folder_id}
- response = self.sumologic_cli.install_app(appid, content)
+ if location == "admin":
+ response = self.sumologic_cli.install_app(appid, content, True)
+ else:
+ response = self.sumologic_cli.install_app(appid, content)
job_id = response.json()["id"]
response = self._wait_for_app_install(job_id)
json_resp = json.loads(response.content)
- if (json_resp['status'] == 'Success'):
+ if json_resp['status'] == 'Success':
app_folder_id = json_resp['statusMessage'].split(":")[1]
print("installed app %s: appFolderId: %s parent_folder_id: %s jobId: %s" % (
appname, app_folder_id, folder_id, job_id))
@@ -652,37 +805,60 @@ def create_by_install_api(self, appid, appname, source_params, folder_name, *arg
print("%s installation failed." % appname)
raise Exception(response.text)
- def create(self, appname, source_params, appid=None, folder_name=None, s3url=None, *args, **kwargs):
+ def create(self, appname, source_params, org_id, is_share=True, location=None, appid=None, folder_name=None, s3url=None,
+ *args, **kwargs):
if appid:
- return self.create_by_install_api(appid, appname, source_params, folder_name, *args, **kwargs)
+ return self.create_by_install_api(appid, appname, source_params, folder_name, org_id, location, is_share, *args,
+ **kwargs)
else:
- return self.create_by_import_api(appname, source_params, folder_name, s3url, *args, **kwargs)
+ return self.create_by_import_api(appname, source_params, folder_name, s3url, org_id, location, is_share, *args,
+ **kwargs)
- def update(self, app_folder_id, appname, source_params, appid=None, folder_name=None, retain_old_app=False,
+ def update(self, app_folder_id, appname, source_params, org_id, is_share=True, location=None, appid=None,
+ folder_name=None, retain_old_app=False,
s3url=None, *args, **kwargs):
+
# Delete is called by CF itself on Old Resource if we create a new resource. So, no need to delete the resource here.
# self.delete(app_folder_id, remove_on_delete_stack=True)
- data, new_app_folder_id = self.create(appname, source_params, appid, folder_name, s3url)
+ is_admin = False
+ if location == "admin":
+ is_admin = True
+ data, new_app_folder_id = self.create(appname=appname, source_params=source_params, appid=appid,
+ folder_name=folder_name, s3url=s3url, org_id=org_id, is_share=is_share,
+ location=location)
print("updated app appFolderId: %s " % new_app_folder_id)
if retain_old_app:
- backup_folder_id = self._create_backup_folder(new_app_folder_id, app_folder_id)
- # Starting Folder Copy
- response = self.sumologic_cli.copy_folder(app_folder_id, backup_folder_id)
- job_id = response.json()["id"]
- print("Copy Completed parentFolderId: %s jobId: %s" % (backup_folder_id, job_id))
- copied_folder_id = self._wait_for_folder_copy(app_folder_id, job_id)
- # Updating copied folder name with suffix BackUp.
- copied_folder_details = self.sumologic_cli.get_folder_by_id(copied_folder_id)
- copied_folder_details = {"name": copied_folder_details["name"].replace("(Copy)", "- BackUp_" + datetime.now().strftime("%H:%M:%S")),
- "description": copied_folder_details["description"][:255]}
- self.sumologic_cli.update_folder_by_id(copied_folder_id, copied_folder_details)
- print("Back Up done for the APP: %s." % backup_folder_id)
+ try:
+ backup_folder_id = self._create_backup_folder(new_app_folder_id, app_folder_id, is_admin)
+ print("backup folder created")
+ # Starting Folder Copy
+ response = self.sumologic_cli.copy_folder(app_folder_id, backup_folder_id, is_admin)
+ job_id = response.json()["id"]
+ print("Copy Completed parentFolderId: %s jobId: %s" % (backup_folder_id, job_id))
+ copied_folder_id = self._wait_for_folder_copy(app_folder_id, job_id)
+ # Updating copied folder name with suffix BackUp.
+ copied_folder_details = self.sumologic_cli.get_folder_by_id(copied_folder_id, is_admin)
+ copied_folder_details = {"name": copied_folder_details["name"].replace("(Copy)",
+ "- BackUp_" + datetime.now().strftime(
+ "%H:%M:%S")),
+ "description": copied_folder_details["description"][:255]}
+ self.sumologic_cli.update_folder_by_id(copied_folder_id, copied_folder_details, is_admin)
+ print("Back Up done for the APP: %s." % backup_folder_id)
+ except Exception as e:
+ print("App - Exception while taking backup of App folder ID %s, error: %s " % (app_folder_id, e))
+
return data, new_app_folder_id
- def delete(self, app_folder_id, remove_on_delete_stack, *args, **kwargs):
+ def delete(self, app_folder_id, remove_on_delete_stack, location=None, *args, **kwargs):
+ is_admin = False
+ if location == "admin":
+ is_admin = True
if remove_on_delete_stack:
- response = self.sumologic_cli.delete_folder(app_folder_id)
- print("deleting app folder %s : %s" % (app_folder_id, response.text))
+ try:
+ response = self.sumologic_cli.delete_folder(app_folder_id, is_admin)
+ print("deleting app folder %s : %s" % (app_folder_id, response.text))
+ except Exception as e:
+ print("App - Exception while deleting the App folder ID %s, error: %s " % (app_folder_id, e))
else:
print("skipping app folder deletion")
@@ -698,7 +874,10 @@ def extract_params(self, event):
"folder_name": props.get("FolderName"),
"retain_old_app": props.get("RetainOldAppOnUpdate") == 'true',
"app_folder_id": app_folder_id,
- "s3url": props.get("AppJsonS3Url")
+ "s3url": props.get("AppJsonS3Url"),
+ "location": 'admin' if props.get("location") == 'Admin Recommended Folder' else 'personal',
+ "is_share": True if props.get("share") == 'True' else False,
+ "org_id": props.get("orgid")
}
@@ -826,7 +1005,9 @@ def delete(self, job_name, metric_rule_name, remove_on_delete_stack, *args, **kw
if remove_on_delete_stack:
try:
response = self.sumologic_cli.delete_metric_rule(metric_rule_name)
- print("METRIC RULES - Completed the Metric Rule deletion for Name %s, response - %s" % (metric_rule_name, response.text))
+ print(
+ "METRIC RULES - Completed the Metric Rule deletion for Name %s, response - %s" % (metric_rule_name,
+ response.text))
except Exception as e:
print("AWS EXPLORER - Exception while deleting the Metric Rules %s," % e)
else:
@@ -863,6 +1044,7 @@ class SumoLogicUpdateFields(SumoResource):
Fields can also be added to new Sources using AWSSource, HTTPSources classes.
Getting collector name, as Calling custom collector resource can update the collector name if stack is updated with different collector name.
"""
+
def add_fields_to_collector(self, collector_id, source_id, fields):
if collector_id and source_id:
sv, etag = self.sumologic_cli.source(collector_id, source_id)
@@ -1267,7 +1449,62 @@ def extract_params(self, event):
return props
+class AccountAlias(SumoResource):
+
+ def get_account_alias(self, account_id, accountaliasmappings3url, accountalias):
+ if accountaliasmappings3url:
+ try:
+ with requests.get(accountaliasmappings3url, stream=True) as r:
+ with tempfile.NamedTemporaryFile() as fp:
+ for chunk in r.iter_content(chunk_size=8192):
+ if chunk:
+ fp.write(chunk)
+ fp.flush()
+ fp.seek(0)
+ with open(fp.name, 'r') as csv_file:
+ csv_reader = csv.reader(csv_file, delimiter=',')
+ for row in csv_reader:
+ if row[0] == account_id and row[1] is not None:
+ if len(str(row[1])) <= 30 and re.search(re.compile(r'[a-z0-9]+$'), row[1]):
+ return {"ACCOUNT_ALIAS": row[1]}, row[1]
+ except Exception as e:
+ print("Exception while trying to read the csv file")
+ print(e)
+ if accountalias:
+ return {"ACCOUNT_ALIAS": accountalias}, accountalias
+ else:
+ return {"ACCOUNT_ALIAS": account_id}, account_id
+ else:
+ if accountalias:
+ return {"ACCOUNT_ALIAS": accountalias}, accountalias
+ else:
+ return {"ACCOUNT_ALIAS": account_id}, account_id
+
+ return {"ACCOUNT_ALIAS": account_id}, account_id
+
+ def create(self, account_id, accountaliasmappings3url, accountalias, *args, **kwargs):
+ return self.get_account_alias(account_id, accountaliasmappings3url, accountalias)
+
+ def update(self, account_id, accountaliasmappings3url, accountalias, *args, **kwargs):
+ return self.get_account_alias(account_id, accountaliasmappings3url, accountalias)
+
+ def delete(self, account_id, accountaliasmappings3url, accountalias, *args, **kwargs):
+ print("In Delete method for Account Alias")
+
+ def extract_params(self, event):
+ props = event.get("ResourceProperties")
+ return {
+ "account_id": props.get("AccountID"),
+ "accountaliasmappings3url": props.get("AccountAliasMappingS3Url"),
+ "accountalias": props.get("AccountAlias")
+ }
+
+
class AlertsMonitor(SumoResource):
+ awso_fields = ["account", "accountid", "apiid", "apiname", "cacheclusterid", "clustername", "dbclusteridentifier",
+ "dbidentifier", "dbinstanceidentifier", "functionname", "instanceid", "loadbalancer",
+ "loadbalancername", "namespace", "networkloadbalancer", "region", "tablename", "topicname",
+ "queuename"]
def _replace_variables(self, appjson_filepath, variables):
with open(appjson_filepath, 'r') as old_file:
@@ -1295,27 +1532,60 @@ def _get_root_folder_id(self):
response = self.sumologic_cli.get_root_folder()
return response["id"]
- def import_monitor(self, folder_name, monitors3url, variables, suffix_date_time):
+ def _fields_present(self):
+ fields_responses = self.sumologic_cli.get_all_fields()
+ field_names = [fields_response['fieldName'] for fields_response in fields_responses]
+ intersection_set = set(field_names).intersection(set(self.awso_fields))
+ intersection_set = list(intersection_set)
+ intersection_set.sort()
+ self.awso_fields.sort()
+ return intersection_set == self.awso_fields
+
+ def import_monitor(self, folder_name, org_id, monitors3url, variables, suffix_date_time, retry_counter=30):
date_format = "%d-%b-%Y %H:%M:%S"
- root_folder_id = self._get_root_folder_id()
- content = self._get_content_from_s3(monitors3url, variables)
- content["name"] = folder_name + " " + datetime.utcnow().strftime(date_format) if suffix_date_time \
- else folder_name
- response = self.sumologic_cli.import_monitors(root_folder_id, content)
- import_id = response["id"]
- print("ALERTS MONITORS - creation successful with ID %s and Name %s." % (import_id, folder_name))
+ if retry_counter <= 0:
+ return {"ALERTS MONITORS": "Monitor Import Timed out"}, None
+ if self._fields_present():
+ try:
+ root_folder_id = self._get_root_folder_id()
+ content = self._get_content_from_s3(monitors3url, variables)
+ content["name"] = folder_name + " " + datetime.utcnow().strftime(date_format) if suffix_date_time \
+ else folder_name
+ response = self.sumologic_cli.import_monitors(root_folder_id, content)
+ import_id = response["id"]
+ # Start Uncomment following when FGP feature for monitors is live
+ # monitor_permission_payload = {"permissionStatementDefinitions": [{"permissions": ["Create","Read","Update","Delete","Manage"],"subjectType": "org","subjectId": org_id,"targetId": import_id}]}
+ # self.sumologic_cli.set_monitors_permissions(monitor_permission_payload)
+ # End Uncomment above when FGP feature for monitors is live
+ print("ALERTS MONITORS - creation successful with ID %s and Name %s." % (import_id, folder_name))
+ except:
+ time.sleep(10)
+ retry_counter -= 1
+ return self.import_monitor(folder_name, org_id, monitors3url, variables, suffix_date_time,
+ retry_counter=retry_counter)
+ else:
+ time.sleep(10)
+ retry_counter -= 1
+ return self.import_monitor(folder_name, org_id, monitors3url, variables, suffix_date_time,
+ retry_counter=retry_counter)
return {"ALERTS MONITORS": response["name"]}, import_id
- def create(self, folder_name, monitors3url, variables, suffix_date_time=False, *args, **kwargs):
- return self.import_monitor(folder_name, monitors3url, variables, suffix_date_time)
+ def create(self, folder_name, org_id, monitors3url, variables, suffix_date_time=False, *args, **kwargs):
+ return self.import_monitor(folder_name, org_id, monitors3url, variables, suffix_date_time)
- def update(self, folder_id, folder_name, monitors3url, variables, suffix_date_time=False, retain_old_alerts=False, *args, **kwargs):
- data, new_folder_id = self.create(folder_name, monitors3url, variables, suffix_date_time)
+ def update(self, folder_id, folder_name, org_id, monitors3url, variables, suffix_date_time=False,
+ retain_old_alerts=False, *args, **kwargs):
+ data, new_folder_id = self.create(folder_name, org_id, monitors3url, variables, suffix_date_time)
if retain_old_alerts:
# Retaining old folder in the new folder as backup.
- old_folder = self.sumologic_cli.export_monitors(folder_id)
- old_folder["name"] = "Back Up " + old_folder["name"]
- self.sumologic_cli.import_monitors(new_folder_id, old_folder)
+ try:
+ old_folder = self.sumologic_cli.export_monitors(folder_id)
+ old_folder["name"] = "Back Up " + old_folder["name"]
+ self.sumologic_cli.import_monitors(new_folder_id, old_folder)
+ except Exception as e:
+ print("Error while taking backup of Monitors folder")
+ print(e)
+
print("ALERTS MONITORS - Update successful with ID %s." % new_folder_id)
return data, new_folder_id
@@ -1343,6 +1613,7 @@ def extract_params(self, event):
"suffix_date_time": props.get("SuffixDateTime") == 'true',
"retain_old_alerts": props.get("RetainOldAlerts") == 'true',
"folder_id": folder_id,
+ "org_id": props.get("orgid")
}
@@ -1350,18 +1621,19 @@ def extract_params(self, event):
props = {
"SumoAccessID": "",
"SumoAccessKey": "",
- "SumoDeployment": "us1",
+ "SumoDeployment": "",
}
- app_prefix = "CloudTrail"
+ app_prefix = "ALB"
# app_prefix = "GuardDuty"
collector_id = None
collector_type = "Hosted"
collector_name = "%sCollector" % app_prefix
source_name = "%sEvents" % app_prefix
source_category = "Labs/AWS/%s" % app_prefix
- # appname = "Global Intelligence for Amazon GuardDuty"
- appname = "Global Intelligence for AWS CloudTrail"
- appid = "570bdc0d-f824-4fcb-96b2-3230d4497180"
+ appname = "AWS Application LB"
+ appid = "ceb7fac5-1137-4a04-a5b8-2e49190be3d4"
+ # appid = "570bdc0d-f824-4fcb-96b2-3230d4497180"
+ s3url = ""
# appid = "ceb7fac5-1137-4a04-a5b8-2e49190be3d4"
# appid = None
# source_params = {
@@ -1374,14 +1646,19 @@ def extract_params(self, event):
}
# col = Collector(**params)
# src = HTTPSource(**params)
- app = App(props)
+ # app = App(props)
# create
# _, collector_id = col.create(collector_type, collector_name, source_category)
# _, source_id = src.create(collector_id, source_name, source_category)
+ # _, app_folder_id = app.create(appname=appname, source_params=source_params,folder_name="abc" ,appid=appid,orgID="0000000000BC5DF9",share=True,location='adm') #install
+ # _, app_folder_id = app.update(app_folder_id='0000000001A70848', appname=appname, source_params=source_params,folder_name="abcd" ,s3url=s3url,orgID="0000000000BC5DF9",share=True,location='admin',retain_old_app=True) #import
+ # app.delete(app_folder_id, True, location='admin')
- _, app_folder_id = app.create(appname, source_params, appid)
- app.delete(app_folder_id, True)
+ monitor = AlertsMonitor(props)
+ monitors3 = "https://sumologic-appdev-aws-sam-apps.s3.amazonaws.com/aws-observability-versions/v2.8.0/appjson/Alerts-App.json"
+ # _, app_folder_id = monitor.create('abc','0000000000BD3DDD',monitors3,"",retain_old_alerts=False)
+ # _, app_folder_id = monitor.update('000000000002796B','abc1','0000000000285A74',monitors3,"",retain_old_alerts=True)
# update
# _, new_collector_id = col.update(collector_id, collector_type, "%sCollectorNew" % app_prefix, "Labs/AWS/%sNew" % app_prefix, description="%s Collector" % app_prefix)
@@ -1399,4 +1676,3 @@ def extract_params(self, event):
# src.delete(collector_id, source_id, True)
# col.delete(collector_id, True)
# app.delete(new_app_folder_id, True)
-
diff --git a/sumologic-app-utils/sumo_app_utils.yaml b/sumologic-app-utils/sumo_app_utils.yaml
index c1dcf7b..0224415 100644
--- a/sumologic-app-utils/sumo_app_utils.yaml
+++ b/sumologic-app-utils/sumo_app_utils.yaml
@@ -17,8 +17,8 @@ Metadata:
- sumologic
- serverless
Name: sumologic-app-utils
- SemanticVersion: 2.0.6
- SourceCodeUrl: https://github.com/SumoLogic/sumologic-aws-lambda/tree/master/sumologic-app-utils
+ SemanticVersion: 2.0.21
+ SourceCodeUrl: https://github.com/SumoLogic/sumologic-aws-lambda/tree/main/sumologic-app-utils
SpdxLicenseId: Apache-2.0
ReadmeUrl: ./README.md
@@ -28,8 +28,8 @@ Resources:
Type: 'AWS::Serverless::Function'
Properties:
Handler: main.handler
- Runtime: python3.7
- CodeUri: s3://appdevstore/sumo_app_utils/v2.0.6/sumo_app_utils.zip
+ Runtime: python3.13
+ CodeUri: s3://appdevstore/sumo_app_utils/v2.0.21/sumo_app_utils.zip
MemorySize: 128
Timeout: 300
Policies:
@@ -54,4 +54,4 @@ Outputs:
Description: "SumoAppUtils Function Role ARN"
Value: !GetAtt SumoAppUtilsFunctionRole.Arn
Export:
- Name: !Sub "${AWS::StackName}-SumoAppUtilsFunctionRole"
\ No newline at end of file
+ Name: !Sub "${AWS::StackName}-SumoAppUtilsFunctionRole"
diff --git a/sumologic-app-utils/sumo_app_utils.zip b/sumologic-app-utils/sumo_app_utils.zip
index 953bee1..ee6fd5b 100644
Binary files a/sumologic-app-utils/sumo_app_utils.zip and b/sumologic-app-utils/sumo_app_utils.zip differ
diff --git a/deploy_function.py b/upload_artifacts.py
similarity index 68%
rename from deploy_function.py
rename to upload_artifacts.py
index 36d7beb..449b458 100644
--- a/deploy_function.py
+++ b/upload_artifacts.py
@@ -14,7 +14,7 @@
"ap-northeast-1",
"ca-central-1",
# "cn-north-1",
- # "ap-northeast-3", #giving errror
+ "ap-northeast-3",
"eu-central-1",
"eu-west-1",
"eu-west-2",
@@ -24,20 +24,48 @@
"ap-east-1",
"me-south-1",
"eu-south-1",
- "af-south-1"
+ "af-south-1",
+ "me-central-1",
+ "eu-central-2",
+ "ap-southeast-3"
]
-
-def get_bucket_name(bucket_prefix, region):
- if region in ("eu-north-1", "me-south-1", "ap-east-1", "af-south-1"):
- return '%s-%ss' % (bucket_prefix, region)
- return '%s-%s' % (bucket_prefix, region)
+region_map = {
+ "us-east-1" : "appdevzipfiles-us-east-1",
+ "us-east-2" : "appdevzipfiles-us-east-2",
+ "us-west-1" : "appdevzipfiles-us-west-1",
+ "us-west-2" : "appdevzipfiles-us-west-2",
+ "ap-south-1": "appdevzipfiles-ap-south-1",
+ "ap-northeast-2":"appdevzipfiles-ap-northeast-2",
+ "ap-southeast-1":"appdevzipfiles-ap-southeast-1",
+ "ap-southeast-2":"appdevzipfiles-ap-southeast-2",
+ "ap-northeast-1":"appdevzipfiles-ap-northeast-1",
+ "ca-central-1": "appdevzipfiles-ca-central-1",
+ "eu-central-1":"appdevzipfiles-eu-central-1",
+ "eu-west-1":"appdevzipfiles-eu-west-1",
+ "eu-west-2":"appdevzipfiles-eu-west-2",
+ "eu-west-3":"appdevzipfiles-eu-west-3",
+ "eu-north-1":"appdevzipfiles-eu-north-1s",
+ "sa-east-1":"appdevzipfiles-sa-east-1",
+ "ap-east-1":"appdevzipfiles-ap-east-1s",
+ "af-south-1":"appdevzipfiles-af-south-1s",
+ "eu-south-1":"appdevzipfiles-eu-south-1",
+ "me-south-1":"appdevzipfiles-me-south-1s",
+ "me-central-1": "appdevzipfiles-me-central-1",
+ "eu-central-2":"appdevzipfiles-eu-central-2ss",
+ "ap-northeast-3" :"appdevzipfiles-ap-northeast-3s",
+ "ap-southeast-3": "appdevzipfiles-ap-southeast-3"
+}
+
+
+def get_bucket_name(region):
+ return region_map[region]
def upload_code_in_multiple_regions(filepath, bucket_prefix):
for region in regions:
- upload_code_in_S3(filepath, get_bucket_name(bucket_prefix, region), region)
+ upload_code_in_S3(filepath, get_bucket_name(region), region)
def create_buckets(bucket_prefix):