Official SDK for SafetyGates content moderation API
Official Node.js client for SafetyGates content moderation API.
``bash`
npm install safetygates
`javascript
const SafetyGates = require('safetygates');
const client = new SafetyGates('sg_live_your_api_key');
// Classify text
const result = await client.classify('you suck at this game', ['toxic', 'harassment']);
console.log(result.results.toxic.label); // true
console.log(result.results.toxic.confidence); // 0.92
// Convenience methods
if (await client.isToxic('some message')) {
console.log('Message blocked');
}
`
Sign up for free at sg-api.cyclecore.ai/signup
- Free tier: 1,000 requests/day
- No credit card required
`javascript`
const client = new SafetyGates(apiKey, options);
| Parameter | Type | Description |
|-----------|------|-------------|
| apiKey | string | Your API key (required) |options.baseUrl
| | string | API URL (default: https://sg-api.cyclecore.ai) |options.timeout
| | number | Request timeout in ms (default: 10000) |
Classify a single text.
`javascript
const result = await client.classify('hello world', ['toxic', 'spam']);
// Result:
{
results: {
toxic: { label: false, confidence: 0.12 },
spam: { label: false, confidence: 0.08 }
},
latency_us: 1234.5
}
`
Classify multiple texts (up to 10,000).
`javascript
const result = await client.classifyBatch(
['hello', 'you suck', 'nice game'],
['toxic']
);
// Result:
{
results: [
{ toxic: { label: false, confidence: 0.1 } },
{ toxic: { label: true, confidence: 0.94 } },
{ toxic: { label: false, confidence: 0.05 } }
],
total_latency_us: 3456.7,
items_per_second: 867.5
}
`
Convenience method to check toxicity.
`javascript
if (await client.isToxic(message)) {
// Block message
}
// With custom threshold
if (await client.isToxic(message, 0.8)) {
// Only block if >80% confident
}
`
Convenience method to check spam.
`javascript`
if (await client.isSpam(message)) {
// Block spam
}
Get available classification gates.
`javascript`
const { gates } = await client.listGates();
// [{ id: 'toxic', category: 'moderation', ... }, ...]
| Gate | Language | Description |
|------|----------|-------------|
| toxic | English | Toxic/abusive content |spam
| | English | Spam/promotional |hate
| | English | Hate speech |nsfw
| | English | Adult content |harassment
| | English | Harassment/bullying |toxic_es
| | Spanish | Toxic content |spam_es
| | Spanish | Spam |hate_es
| | Spanish | Hate speech |toxic_pt
| | Portuguese | Toxic content |toxic_fr
| | French | Toxic content |
Use the GATES constant for autocomplete:
`javascript`
const { GATES } = require('safetygates');
client.classify(text, [GATES.TOXIC, GATES.SPAM]);
`javascript
const { SafetyGatesError } = require('safetygates');
try {
await client.classify('test', ['invalid_gate']);
} catch (err) {
if (err instanceof SafetyGatesError) {
console.log(err.statusCode); // 400
console.log(err.detail); // { detail: "Unknown gates: ['invalid_gate']" }
}
}
`
`javascript
const SafetyGates = require('safetygates');
const { Client, GatewayIntentBits } = require('discord.js');
const sg = new SafetyGates(process.env.SAFETYGATES_KEY);
const discord = new Client({ intents: [GatewayIntentBits.Guilds, GatewayIntentBits.GuildMessages] });
discord.on('messageCreate', async (message) => {
if (message.author.bot) return;
if (await sg.isToxic(message.content, 0.7)) {
await message.delete();
await message.channel.send(${message.author}, please keep it civil.);
}
});
discord.login(process.env.DISCORD_TOKEN);
`
Full TypeScript support included:
`typescript
import SafetyGates, { ClassifyResult, GATES } from 'safetygates';
const client = new SafetyGates('sg_live_xxx');
const result: ClassifyResult = await client.classify('test', [GATES.TOXIC]);
``
- Documentation
- Get API Key
- CycleCore
MIT