Skip to content

Commit 94b9408

Browse files
committed
Add LICENSE, and s3 function
1 parent d115a7e commit 94b9408

File tree

3 files changed

+153
-0
lines changed

3 files changed

+153
-0
lines changed

LICENSE.txt

Lines changed: 13 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,13 @@
1+
Copyright 2015, Sumo Logic Inc. All Rights Reserved.
2+
3+
Licensed under the Apache License, Version 2.0 (the "License");
4+
you may not use this file except in compliance with the License.
5+
You may obtain a copy of the License at
6+
7+
http://www.apache.org/licenses/LICENSE-2.0
8+
9+
Unless required by applicable law or agreed to in writing, software
10+
distributed under the License is distributed on an "AS IS" BASIS,
11+
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12+
See the License for the specific language governing permissions and
13+
limitations under the License.

s3/README.md

Lines changed: 56 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,56 @@
1+
# S3 to SumoLogic
2+
This function reads files from an S3 bucket to a SumoLogic hosted HTTP collector. Files in the source bucket can be gzipped, or in cleartext, but should contain only texts.
3+
4+
## How it works
5+
The function receives S3 notifications on new files uploaded to the source S3 bucket. It then reads these files, or unzips them if the file names end with `gz`, and sends to the target Sumo endpoint.
6+
7+
## Lambda Setup
8+
For the Sumo collector configuration, do not enable multiline processing or
9+
one message per request -- the idea is to send as many messages in one request
10+
as possible to Sumo and let Sumo break them apart as needed.
11+
12+
In the AWS console, use a code entry type of 'Edit code inline' and paste in the
13+
code (doublecheck the hostname and path as per your collector setup).
14+
15+
In configuration specify index.handler as the Handler. Specify a Role that has
16+
sufficient privileges to read from the *source* bucket, and invoke a lambda
17+
function. One can use the AWSLambdaBasicExecution and the AWSS3ReadOnlyAccess role, although it is *strongly* recommended to customize them to restrict to relevant resources in production:
18+
19+
<pre>
20+
{
21+
"Version": "2012-10-17",
22+
"Statement": [
23+
{
24+
"Effect": "Allow",
25+
"Action": [
26+
"logs:CreateLogGroup",
27+
"logs:CreateLogStream",
28+
"logs:PutLogEvents"
29+
],
30+
"Resource": "arn:aws:logs:*:*:*"
31+
}
32+
]
33+
}
34+
</pre>
35+
36+
AND
37+
38+
<pre>
39+
{
40+
"Version": "2012-10-17",
41+
"Statement": [
42+
{
43+
"Effect": "Allow",
44+
"Action": [
45+
"s3:Get*",
46+
"s3:List*"
47+
],
48+
"Resource": "*"
49+
}
50+
]
51+
}
52+
</pre>
53+
54+
Once the function is created, you can tie it to the source S3 bucket. From the S3 Management console, select the bucket, goto its Properties, select Events and add a Notification. From there, provide a name for the notification, select *ObjectCreated (All)* as the Events, and select *Lambda* as the *Send To* option. Finally, select the Lambda function created above and Save.
55+
56+

s3/s3.js

Lines changed: 84 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,84 @@
1+
var AWS = require('aws-sdk');
2+
var s3 = new AWS.S3();
3+
var https = require('https');
4+
var zlib = require('zlib');
5+
6+
///////////////////////////////////////////////////////////////////////////////////////////////////////////
7+
// Remember to change the hostname and path to match your collection API and specific HTTP-source endpoint
8+
// See more at: https://service.sumologic.com/help/Default.htm#Collector_Management_API.htm
9+
///////////////////////////////////////////////////////////////////////////////////////////////////////////
10+
11+
var options = { 'hostname': 'endpoint1.collection.sumologic.com',
12+
'path': 'https://endpoint1.collection.sumologic.com/receiver/v1/http/<XXXX>',
13+
'method': 'POST'
14+
};
15+
16+
17+
function s3LogsToSumo(bucket, objKey,context) {
18+
var req = https.request(options, function(res) {
19+
var body = '';
20+
console.log('Status:', res.statusCode);
21+
res.setEncoding('utf8');
22+
res.on('data', function(chunk) { body += chunk; });
23+
res.on('end', function() {
24+
console.log('Successfully processed HTTPS response');
25+
context.succeed();
26+
});
27+
});
28+
29+
var finalData = '';
30+
var totalBytes = 0;
31+
var isCompressed = false;
32+
if (objKey.match(/\.gz$/)) {
33+
isCompressed = true;
34+
}
35+
36+
var finishFnc = function() {
37+
console.log("End of stream");
38+
console.log("Final total byte read: "+totalBytes);
39+
req.end();
40+
context.succeed();
41+
}
42+
43+
var s3Stream = s3.getObject({Bucket: bucket, Key: objKey}).createReadStream();
44+
s3Stream.on('error', function() {
45+
console.log(
46+
'Error getting object "' + objKey + '" from bucket "' + bucket + '". ' +
47+
'Make sure they exist and your bucket is in the same region as this function.');
48+
context.fail();
49+
});
50+
51+
req.write('Bucket: '+bucket + ' ObjectKey: ' + objKey +'\n');
52+
53+
if (!isCompressed) {
54+
s3Stream.on('data',function(data) {
55+
//console.log("Read bytes:" +data.length);
56+
finalData += data;
57+
req.write(data+'\n');
58+
totalBytes += data.length;
59+
});
60+
s3Stream.on('end',finishFnc);
61+
} else {
62+
var gunzip = zlib.createGunzip();
63+
s3Stream.pipe(gunzip);
64+
65+
gunzip.on('data',function(data) {
66+
totalBytes += data.length;
67+
req.write(data.toString()+'\n');
68+
finalData += data.toString();
69+
}).on('end',finishFnc)
70+
.on('error',function(error) {
71+
context.fail(error);
72+
})
73+
}
74+
}
75+
76+
exports.handler = function(event, context) {
77+
options.agent = new https.Agent(options);
78+
event.Records.forEach(function(record) {
79+
var bucket = record.s3.bucket.name;
80+
var objKey = decodeURIComponent(record.s3.object.key.replace(/\+/g, ' '));
81+
console.log('Bucket: '+bucket + ' ObjectKey: ' + objKey);
82+
s3LogsToSumo(bucket, objKey, context);
83+
});
84+
}

0 commit comments

Comments
 (0)