1 |
/** |
2 |
* This file is part of SudoBot. |
3 |
* |
4 |
* Copyright (C) 2021-2023 OSN Developers. |
5 |
* |
6 |
* SudoBot is free software; you can redistribute it and/or modify it |
7 |
* under the terms of the GNU Affero General Public License as published by |
8 |
* the Free Software Foundation, either version 3 of the License, or |
9 |
* (at your option) any later version. |
10 |
* |
11 |
* SudoBot is distributed in the hope that it will be useful, but |
12 |
* WITHOUT ANY WARRANTY; without even the implied warranty of |
13 |
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
14 |
* GNU Affero General Public License for more details. |
15 |
* |
16 |
* You should have received a copy of the GNU Affero General Public License |
17 |
* along with SudoBot. If not, see <https://www.gnu.org/licenses/>. |
18 |
*/ |
19 |
|
20 |
import { Message, PermissionFlagsBits } from "discord.js"; |
21 |
import Service from "../core/Service"; |
22 |
import { HasEventListeners } from "../types/HasEventListeners"; |
23 |
import { log, logError } from "../utils/Logger"; |
24 |
import { isImmuneToAutoMod } from "../utils/utils"; |
25 |
|
26 |
export const name = "aiAutoMod"; |
27 |
const discoveryURL = "https://commentanalyzer.googleapis.com/$discovery/rest?version=v1alpha1"; |
28 |
|
29 |
// TODO: Add support for other type of message attributes |
30 |
|
31 |
type GoogleClient = { |
32 |
comments: { |
33 |
analyze: ( |
34 |
params: unknown, |
35 |
callback: (error: Error | null, response: unknown) => void |
36 |
) => void; |
37 |
}; |
38 |
}; |
39 |
|
40 |
type GoogleResponse = { |
41 |
data: { |
42 |
attributeScores: { |
43 |
TOXICITY: { |
44 |
summaryScore: { |
45 |
value: number; |
46 |
}; |
47 |
}; |
48 |
THREAT: { |
49 |
summaryScore: { |
50 |
value: number; |
51 |
}; |
52 |
}; |
53 |
SEVERE_TOXICITY: { |
54 |
summaryScore: { |
55 |
value: number; |
56 |
}; |
57 |
}; |
58 |
IDENTITY_ATTACK: { |
59 |
summaryScore: { |
60 |
value: number; |
61 |
}; |
62 |
}; |
63 |
INSULT: { |
64 |
summaryScore: { |
65 |
value: number; |
66 |
}; |
67 |
}; |
68 |
PROFANITY: { |
69 |
summaryScore: { |
70 |
value: number; |
71 |
}; |
72 |
}; |
73 |
SEXUALLY_EXPLICIT: { |
74 |
summaryScore: { |
75 |
value: number; |
76 |
}; |
77 |
}; |
78 |
FLIRTATION: { |
79 |
summaryScore: { |
80 |
value: number; |
81 |
}; |
82 |
}; |
83 |
}; |
84 |
}; |
85 |
}; |
86 |
|
87 |
export default class AIAutoModService extends Service implements HasEventListeners { |
88 |
protected googleClient: GoogleClient | undefined = undefined; |
89 |
|
90 |
analyze(client: GoogleClient, params: unknown) { |
91 |
return new Promise<GoogleResponse>((resolve, reject) => { |
92 |
client.comments.analyze(params, (error: Error | null, response: unknown) => { |
93 |
if (error) { |
94 |
reject(error); |
95 |
return; |
96 |
} |
97 |
|
98 |
resolve(response as GoogleResponse); |
99 |
}); |
100 |
}); |
101 |
} |
102 |
|
103 |
async boot() { |
104 |
if (!process.env.PERSPECTIVE_API_TOKEN) { |
105 |
const { google } = await import("googleapis"); |
106 |
this.googleClient = await google.discoverAPI(discoveryURL); |
107 |
} |
108 |
} |
109 |
|
110 |
async onMessageCreate(message: Message<boolean>) { |
111 |
if (!process.env.PERSPECTIVE_API_TOKEN || !this.googleClient || !message.content?.trim()) { |
112 |
return false; |
113 |
} |
114 |
|
115 |
const config = this.client.configManager.config[message.guildId!]?.ai_automod; |
116 |
|
117 |
if (!config?.enabled || !message.deletable) { |
118 |
return false; |
119 |
} |
120 |
|
121 |
if ( |
122 |
await isImmuneToAutoMod( |
123 |
this.client, |
124 |
message.member!, |
125 |
PermissionFlagsBits.ManageMessages |
126 |
) |
127 |
) { |
128 |
return; |
129 |
} |
130 |
|
131 |
const { |
132 |
max_severe_toxicity, |
133 |
max_threat, |
134 |
max_toxicity, |
135 |
max_explicit, |
136 |
max_flirtation, |
137 |
max_identity_attack, |
138 |
max_insult, |
139 |
max_profanity |
140 |
} = config.parameters; |
141 |
|
142 |
try { |
143 |
const response = await this.analyze(this.googleClient!, { |
144 |
key: process.env.PERSPECTIVE_API_TOKEN, |
145 |
resource: { |
146 |
requestedAttributes: { |
147 |
TOXICITY: {}, |
148 |
THREAT: {}, |
149 |
SEVERE_TOXICITY: {}, |
150 |
IDENTITY_ATTACK: {}, |
151 |
INSULT: {}, |
152 |
PROFANITY: {}, |
153 |
SEXUALLY_EXPLICIT: {}, |
154 |
FLIRTATION: {} |
155 |
}, |
156 |
comment: { |
157 |
text: message.content |
158 |
}, |
159 |
languages: ["en"] |
160 |
} |
161 |
}); |
162 |
|
163 |
log(JSON.stringify(response.data.attributeScores, null, 4)); |
164 |
|
165 |
const threatScore = response.data.attributeScores.THREAT.summaryScore.value * 100; |
166 |
const toxicityScore = response.data.attributeScores.TOXICITY.summaryScore.value * 100; |
167 |
const severeToxicityScore = |
168 |
response.data.attributeScores.SEVERE_TOXICITY.summaryScore.value * 100; |
169 |
const explicitScore = |
170 |
response.data.attributeScores.SEXUALLY_EXPLICIT.summaryScore.value * 100; |
171 |
const flirtationScore = |
172 |
response.data.attributeScores.FLIRTATION.summaryScore.value * 100; |
173 |
const identityAttackScore = |
174 |
response.data.attributeScores.IDENTITY_ATTACK.summaryScore.value * 100; |
175 |
const insultScore = response.data.attributeScores.INSULT.summaryScore.value * 100; |
176 |
const profanityScore = response.data.attributeScores.PROFANITY.summaryScore.value * 100; |
177 |
|
178 |
const isThreat = threatScore >= max_threat; |
179 |
const isToxic = toxicityScore >= max_toxicity; |
180 |
const isSeverelyToxic = severeToxicityScore >= max_severe_toxicity; |
181 |
const isExplicit = explicitScore >= max_explicit; |
182 |
const isFlirty = flirtationScore >= max_flirtation; |
183 |
const isAttack = identityAttackScore >= max_identity_attack; |
184 |
const isInsult = insultScore >= max_insult; |
185 |
const isProfanity = profanityScore >= max_profanity; |
186 |
|
187 |
if ( |
188 |
isThreat || |
189 |
isToxic || |
190 |
isSeverelyToxic || |
191 |
isExplicit || |
192 |
isFlirty || |
193 |
isAttack || |
194 |
isInsult || |
195 |
isProfanity |
196 |
) { |
197 |
await message.delete(); |
198 |
await this.client.loggerService.logAIAutoModMessageDelete({ |
199 |
message, |
200 |
toxicityScore, |
201 |
severeToxicityScore, |
202 |
threatScore, |
203 |
isSeverelyToxic, |
204 |
isThreat, |
205 |
isToxic, |
206 |
isExplicit, |
207 |
isFlirty, |
208 |
isAttack, |
209 |
isInsult, |
210 |
isProfanity, |
211 |
explicitScore, |
212 |
flirtationScore, |
213 |
identityAttackScore, |
214 |
insultScore, |
215 |
profanityScore |
216 |
}); |
217 |
} |
218 |
} catch (e) { |
219 |
logError(e); |
220 |
} |
221 |
} |
222 |
} |