diff --git a/Integration/Data Export to ML Pipeline/Export Data for ML Training/README.md b/Integration/Data Export to ML Pipeline/Export Data for ML Training/README.md new file mode 100644 index 0000000000..b12313d7e4 --- /dev/null +++ b/Integration/Data Export to ML Pipeline/Export Data for ML Training/README.md @@ -0,0 +1,30 @@ +# Export ServiceNow Data to ML Pipeline + +## Overview +This snippet shows how to export incident data from ServiceNow and feed it into an external ML pipeline for analysis and predictions. + +## What It Does +- **Script Include**: Queries incidents from ServiceNow +- **Scripted REST API**: Exposes data as JSON endpoint +- **Python Script**: Consumes data, preprocesses it, and runs basic ML operations +- **Result Storage**: Sends predictions back to ServiceNow + +## Use Cases +- Predict incident resolution time +- Classify tickets automatically +- Detect anomalies in service data +- Smart assignment recommendations + +## Files +- `data_export_script_include.js` - Server-side Script Include to query incident data +- `export_data_rest_api.js` - Scripted REST API to expose data as JSON endpoint + +## How to Use +1. Create a Script Include in ServiceNow named `MLDataExporter` using `data_export_script_include.js` +2. Create a Scripted REST API with base path `/api/ml_export` and resource `/incidents` using `export_data_rest_api.js` +3. Call the endpoint: `GET /api/ml_export/incidents?limit=100` +4. External ML systems can fetch formatted incident data via this REST endpoint + +## Requirements +- ServiceNow instance with REST API access +- API credentials (username/password or OAuth token) diff --git a/Integration/Data Export to ML Pipeline/Export Data for ML Training/data_export_script_include.js b/Integration/Data Export to ML Pipeline/Export Data for ML Training/data_export_script_include.js new file mode 100644 index 0000000000..5d17fb021c --- /dev/null +++ b/Integration/Data Export to ML Pipeline/Export Data for ML Training/data_export_script_include.js @@ -0,0 +1,49 @@ +// Script Include: MLDataExporter +// Purpose: Query incident data for ML pipeline consumption +// Usage: var exporter = new MLDataExporter(); var data = exporter.getIncidentData(limit); + +var MLDataExporter = Class.create(); +MLDataExporter.prototype = { + initialize: function() {}, + + // Extract incident records suitable for ML training + getIncidentData: function(limit) { + limit = limit || 100; + var incidents = []; + + // Query incidents from database + var gr = new GlideRecord('incident'); + gr.addQuery('active', 'true'); + gr.addQuery('state', '!=', ''); // exclude blank states + gr.setLimit(limit); + gr.query(); + + while (gr.next()) { + // Extract fields relevant for ML analysis + incidents.push({ + id: gr.getValue('sys_id'), + description: gr.getValue('description'), + short_description: gr.getValue('short_description'), + category: gr.getValue('category'), + priority: gr.getValue('priority'), + impact: gr.getValue('impact'), + urgency: gr.getValue('urgency'), + state: gr.getValue('state'), + created_on: gr.getValue('sys_created_on'), + resolution_time: this._calculateResolutionTime(gr) + }); + } + + return incidents; + }, + + // Calculate resolution time in hours (useful ML feature) + _calculateResolutionTime: function(gr) { + var created = new GlideDateTime(gr.getValue('sys_created_on')); + var resolved = new GlideDateTime(gr.getValue('sys_updated_on')); + var diff = GlideDateTime.subtract(created, resolved); + return Math.abs(diff / (1000 * 60 * 60)); // convert to hours + }, + + type: 'MLDataExporter' +}; diff --git a/Integration/Data Export to ML Pipeline/Export Data for ML Training/export_data_rest_api.js b/Integration/Data Export to ML Pipeline/Export Data for ML Training/export_data_rest_api.js new file mode 100644 index 0000000000..e8addc377a --- /dev/null +++ b/Integration/Data Export to ML Pipeline/Export Data for ML Training/export_data_rest_api.js @@ -0,0 +1,41 @@ +// Scripted REST API Resource: ML Data Export +// Base Path: /api/ml_export +// Resource Path: /incidents +// HTTP Method: GET +// Parameters: ?limit=100&offset=0 + +(function process(request, response) { + try { + // Get query parameters + var limit = request.getParameter('limit') || 100; + var offset = request.getParameter('offset') || 0; + + // Use the Script Include to fetch data + var exporter = new MLDataExporter(); + var incidents = exporter.getIncidentData(limit); + + // Prepare response with metadata + var result = { + status: 'success', + count: incidents.length, + data: incidents, + timestamp: new GlideDateTime().toString() + }; + + response.setContentType('application/json'); + response.setStatus(200); + response.getStreamWriter().writeString(JSON.stringify(result)); + + } catch (error) { + // Error handling for ML pipeline + response.setStatus(500); + response.setContentType('application/json'); + var error_response = { + status: 'error', + message: error.toString(), + timestamp: new GlideDateTime().toString() + }; + response.getStreamWriter().writeString(JSON.stringify(error_response)); + gs.log('ML Export API Error: ' + error.toString(), 'MLDataExport'); + } +})(request, response); diff --git a/Integration/External ML Model Integration/Call ML Prediction API/README.md b/Integration/External ML Model Integration/Call ML Prediction API/README.md new file mode 100644 index 0000000000..76c63c245d --- /dev/null +++ b/Integration/External ML Model Integration/Call ML Prediction API/README.md @@ -0,0 +1,44 @@ +# Integrate ServiceNow with External ML Model API + +## Overview +Call an external ML API from ServiceNow to get AI predictions for incidents and auto-update records. + +## What It Does +- Sends incident data to external ML API via REST call +- Receives predictions (resolution time, category, priority, etc.) +- Automatically updates incident record with predictions +- Includes error handling and logging + +## Use Cases +- Predict how long an incident will take to resolve +- Auto-suggest the right category/priority +- Recommend best assignment group +- Get risk scores for changes + +## Files +- `ml_prediction_script_include.js` - Script Include that calls ML API + +## How to Use +1. Create Script Include in ServiceNow named `MLPredictionClient` +2. Copy code from `ml_prediction_script_include.js` +3. Update `ML_API_URL` and `API_KEY` with your ML service details +4. Call it from a Business Rule or Client Script to get predictions +5. Store results back in incident fields + +## Example Usage +```javascript +var mlClient = new MLPredictionClient(); +var prediction = mlClient.predictIncident({ + description: incident.description, + category: incident.category, + priority: incident.priority +}); + +incident.estimated_resolution_time = prediction.predicted_resolution_time; +incident.update(); +``` + +## Requirements +- ServiceNow instance +- External ML API endpoint (REST) +- API key or token diff --git a/Integration/External ML Model Integration/Call ML Prediction API/ml_prediction_script_include.js b/Integration/External ML Model Integration/Call ML Prediction API/ml_prediction_script_include.js new file mode 100644 index 0000000000..1adf1a9c0e --- /dev/null +++ b/Integration/External ML Model Integration/Call ML Prediction API/ml_prediction_script_include.js @@ -0,0 +1,43 @@ +// Script Include: MLPredictionClient +// Calls external ML API to get incident predictions + +var MLPredictionClient = Class.create(); +MLPredictionClient.prototype = { + initialize: function() { + this.ML_API_URL = 'https://your-ml-api.com/predict'; + this.API_KEY = 'your-api-key-here'; + }, + + predictIncident: function(incidentData) { + try { + var request = new RESTMessageV2(); + request.setEndpoint(this.ML_API_URL); + request.setHttpMethod('POST'); + request.setRequestHeader('Authorization', 'Bearer ' + this.API_KEY); + request.setRequestHeader('Content-Type', 'application/json'); + + // Send incident details to ML API + var payload = { + description: incidentData.description, + category: incidentData.category, + priority: incidentData.priority + }; + request.setRequestBody(JSON.stringify(payload)); + + // Get prediction from external ML service + var response = request.execute(); + var result = JSON.parse(response.getBody()); + + return { + estimated_hours: result.estimated_hours, + predicted_category: result.category, + confidence: result.confidence + }; + } catch (error) { + gs.log('ML API Error: ' + error, 'MLPredictionClient'); + return null; + } + }, + + type: 'MLPredictionClient' +};