1 |
rakinar2 |
577 |
/** |
2 |
|
|
* This file is part of SudoBot. |
3 |
|
|
* |
4 |
|
|
* Copyright (C) 2021-2023 OSN Developers. |
5 |
|
|
* |
6 |
|
|
* SudoBot is free software; you can redistribute it and/or modify it |
7 |
|
|
* under the terms of the GNU Affero General Public License as published by |
8 |
|
|
* the Free Software Foundation, either version 3 of the License, or |
9 |
|
|
* (at your option) any later version. |
10 |
|
|
* |
11 |
|
|
* SudoBot is distributed in the hope that it will be useful, but |
12 |
|
|
* WITHOUT ANY WARRANTY; without even the implied warranty of |
13 |
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
14 |
|
|
* GNU Affero General Public License for more details. |
15 |
|
|
* |
16 |
|
|
* You should have received a copy of the GNU Affero General Public License |
17 |
|
|
* along with SudoBot. If not, see <https://www.gnu.org/licenses/>. |
18 |
|
|
*/ |
19 |
|
|
|
20 |
|
|
import { Message, PermissionFlagsBits } from "discord.js"; |
21 |
|
|
import { google } from "googleapis"; |
22 |
|
|
import Service from "../core/Service"; |
23 |
|
|
import { HasEventListeners } from "../types/HasEventListeners"; |
24 |
|
|
import { log, logError } from "../utils/logger"; |
25 |
|
|
import { isImmuneToAutoMod } from "../utils/utils"; |
26 |
|
|
|
27 |
|
|
export const name = "aiAutoMod"; |
28 |
|
|
const discoveryURL = "https://commentanalyzer.googleapis.com/$discovery/rest?version=v1alpha1"; |
29 |
|
|
|
30 |
|
|
// TODO: Add support for other type of message attributes |
31 |
|
|
|
32 |
|
|
export default class AIAutoModService extends Service implements HasEventListeners { |
33 |
|
|
protected googleClient: any = undefined; |
34 |
|
|
|
35 |
|
|
analyze(client: any, params: any) { |
36 |
|
|
return new Promise<any>((resolve, reject) => { |
37 |
|
|
client.comments.analyze(params, (error: any, response: any) => { |
38 |
|
|
if (error) { |
39 |
|
|
reject(error); |
40 |
|
|
return; |
41 |
|
|
} |
42 |
|
|
|
43 |
|
|
resolve(response); |
44 |
|
|
}); |
45 |
|
|
}); |
46 |
|
|
} |
47 |
|
|
|
48 |
|
|
async boot() { |
49 |
|
|
this.googleClient = await google.discoverAPI<any>(discoveryURL); |
50 |
|
|
} |
51 |
|
|
|
52 |
|
|
async onMessageCreate(message: Message<boolean>) { |
53 |
|
|
if (!process.env.PERSPECTIVE_API_TOKEN || !message.content?.trim()) { |
54 |
|
|
return false; |
55 |
|
|
} |
56 |
|
|
|
57 |
|
|
const config = this.client.configManager.config[message.guildId!]?.ai_automod; |
58 |
|
|
|
59 |
|
|
if (!config?.enabled || !message.deletable) { |
60 |
|
|
return false; |
61 |
|
|
} |
62 |
|
|
|
63 |
|
|
if (await isImmuneToAutoMod(this.client, message.member!, PermissionFlagsBits.ManageMessages)) { |
64 |
|
|
return; |
65 |
|
|
} |
66 |
|
|
|
67 |
|
|
const { |
68 |
|
|
max_severe_toxicity, |
69 |
|
|
max_threat, |
70 |
|
|
max_toxicity, |
71 |
|
|
max_explicit, |
72 |
|
|
max_flirtation, |
73 |
|
|
max_identity_attack, |
74 |
|
|
max_insult, |
75 |
|
|
max_profanity |
76 |
|
|
} = config.parameters; |
77 |
|
|
|
78 |
|
|
try { |
79 |
|
|
const response = await this.analyze(this.googleClient, { |
80 |
|
|
key: process.env.PERSPECTIVE_API_TOKEN, |
81 |
|
|
resource: { |
82 |
|
|
requestedAttributes: { |
83 |
|
|
TOXICITY: {}, |
84 |
|
|
THREAT: {}, |
85 |
|
|
SEVERE_TOXICITY: {}, |
86 |
|
|
IDENTITY_ATTACK: {}, |
87 |
|
|
INSULT: {}, |
88 |
|
|
PROFANITY: {}, |
89 |
|
|
SEXUALLY_EXPLICIT: {}, |
90 |
|
|
FLIRTATION: {} |
91 |
|
|
}, |
92 |
|
|
comment: { |
93 |
|
|
text: message.content |
94 |
|
|
}, |
95 |
|
|
languages: ["en"] |
96 |
|
|
} |
97 |
|
|
}); |
98 |
|
|
|
99 |
|
|
log(JSON.stringify(response.data.attributeScores, null, 4)); |
100 |
|
|
|
101 |
|
|
const threatScore = response.data.attributeScores.THREAT.summaryScore.value * 100; |
102 |
|
|
const toxicityScore = response.data.attributeScores.TOXICITY.summaryScore.value * 100; |
103 |
|
|
const severeToxicityScore = response.data.attributeScores.SEVERE_TOXICITY.summaryScore.value * 100; |
104 |
|
|
const explicitScore = response.data.attributeScores.SEXUALLY_EXPLICIT.summaryScore.value * 100; |
105 |
|
|
const flirtationScore = response.data.attributeScores.FLIRTATION.summaryScore.value * 100; |
106 |
|
|
const identityAttackScore = response.data.attributeScores.IDENTITY_ATTACK.summaryScore.value * 100; |
107 |
|
|
const insultScore = response.data.attributeScores.INSULT.summaryScore.value * 100; |
108 |
|
|
const profanityScore = response.data.attributeScores.PROFANITY.summaryScore.value * 100; |
109 |
|
|
|
110 |
|
|
const isThreat = threatScore >= max_threat; |
111 |
|
|
const isToxic = toxicityScore >= max_toxicity; |
112 |
|
|
const isSeverelyToxic = severeToxicityScore >= max_severe_toxicity; |
113 |
|
|
const isExplicit = explicitScore >= max_explicit; |
114 |
|
|
const isFlirty = flirtationScore >= max_flirtation; |
115 |
|
|
const isAttack = identityAttackScore >= max_identity_attack; |
116 |
|
|
const isInsult = insultScore >= max_insult; |
117 |
|
|
const isProfanity = profanityScore >= max_profanity; |
118 |
|
|
|
119 |
|
|
if (isThreat || isToxic || isSeverelyToxic || isExplicit || isFlirty || isAttack || isInsult || isProfanity) { |
120 |
|
|
await message.delete(); |
121 |
|
|
await this.client.logger.logAIAutoModMessageDelete({ |
122 |
|
|
message, |
123 |
|
|
toxicityScore, |
124 |
|
|
severeToxicityScore, |
125 |
|
|
threatScore, |
126 |
|
|
isSeverelyToxic, |
127 |
|
|
isThreat, |
128 |
|
|
isToxic, |
129 |
|
|
isExplicit, |
130 |
|
|
isFlirty, |
131 |
|
|
isAttack, |
132 |
|
|
isInsult, |
133 |
|
|
isProfanity, |
134 |
|
|
explicitScore, |
135 |
|
|
flirtationScore, |
136 |
|
|
identityAttackScore, |
137 |
|
|
insultScore, |
138 |
|
|
profanityScore |
139 |
|
|
}); |
140 |
|
|
} |
141 |
|
|
} catch (e) { |
142 |
|
|
logError(e); |
143 |
|
|
} |
144 |
|
|
} |
145 |
|
|
} |