Page Menu
Home
Phabricator
Search
Configure Global Search
Log In
Files
F59409
No One
Temporary
Actions
View File
Edit File
Delete File
View Transforms
Subscribe
Mute Notifications
Award Token
Flag For Later
Size
5 KB
Subscribers
None
View Options
diff --git a/lib/couch.ex b/lib/couch.ex
new file mode 100644
index 0000000..fdd8579
--- /dev/null
+++ b/lib/couch.ex
@@ -0,0 +1,18 @@
+defmodule Couch do
+ def get(db, doc) do
+ config = Application.get_env(:lsg, :couch)
+ url = [Keyword.get(config, :url), db, doc] |> Enum.join("/")
+ user = Keyword.get(config, :user)
+ pass = Keyword.get(config, :pass)
+ client_options = Keyword.get(config, :client_options, [])
+ headers = [{"accept", "application/json"}, {"user-agent", "beautte"}]
+ options = [hackney: [:insecure, {:basic_auth, {user, pass}}]] ++ client_options
+ case HTTPoison.get(url, headers, options) do
+ {:ok, %HTTPoison.Response{status_code: 200, body: body}} ->
+ {:ok, Poison.decode!(body)}
+ {:ok, %HTTPoison.Response{status_code: 404}} ->
+ {:error, :not_found}
+ error -> {:error, {:couchdb_error, error}}
+ end
+ end
+end
diff --git a/lib/lsg_irc/gpt_plugin.ex b/lib/lsg_irc/gpt_plugin.ex
new file mode 100644
index 0000000..f628f8d
--- /dev/null
+++ b/lib/lsg_irc/gpt_plugin.ex
@@ -0,0 +1,103 @@
+defmodule LSG.IRC.GptPlugin do
+ require Logger
+
+ def irc_doc() do
+ """
+ # OpenAI GPT
+
+ * **!gpt** list GPT tasks
+ * **!gpt `[task]` `<task args>`** run a task
+ * **?offensive `<content>`** is content offensive
+ """
+ end
+
+ @couch_db "bot-plugin-openai-prompts"
+ @trigger "gpt"
+
+ def start_link() do
+ GenServer.start_link(__MODULE__, [], name: __MODULE__)
+ end
+
+ def init(_) do
+ regopts = [plugin: __MODULE__]
+ {:ok, _} = Registry.register(IRC.PubSub, "trigger:#{@trigger}", regopts)
+ {:ok, _} = Registry.register(IRC.PubSub, "trigger:offensive", regopts)
+ {:ok, nil}
+ end
+
+ def handle_info({:irc, :trigger, @trigger, m = %IRC.Message{trigger: %IRC.Trigger{type: :bang, args: [task | args]}}}, state) do
+ case Couch.get(@couch_db, task) do
+ {:ok, task} -> task(m, task, Enum.join(args, " "))
+ {:error, :not_found} -> m.replyfun.("gpt: no such task: #{task}")
+ error ->
+ Logger.info("gpt: task load error: #{inspect error}")
+ m.replyfun.("gpt: database error")
+ end
+ {:noreply, state}
+ end
+
+ def handle_info({:irc, :trigger, @trigger, m = %IRC.Message{trigger: %IRC.Trigger{type: :bang, args: []}}}, state) do
+ case Couch.get(@couch_db, "_all_docs") do
+ {:ok, %{"rows" => []}} -> m.replyfun.("gpt: no tasks available")
+ {:ok, %{"rows" => tasks}} ->
+ tasks = tasks |> Enum.map(fn(task) -> Map.get(task, "id") end) |> Enum.join(", ")
+ m.replyfun.("gpt: tasks: #{tasks}")
+ error ->
+ Logger.info("gpt: task load error: #{inspect error}")
+ m.replyfun.("gpt: database error")
+ end
+ {:noreply, state}
+ end
+
+ def handle_info({:irc, :trigger, "offensive", m = %IRC.Message{trigger: %IRC.Trigger{type: :query, args: text}}}, state) do
+ text = Enum.join(text, " ")
+ {moderate?, moderation} = moderation(text, m.account.id)
+ reply = cond do
+ moderate? -> "⚠️ #{Enum.join(moderation, ", ")}"
+ !moderate? && moderation -> "👍"
+ !moderate? -> "☠️ error"
+ end
+ m.replyfun.(reply)
+ {:noreply, state}
+ end
+
+ def handle_info(_, state) do
+ {:noreply, state}
+ end
+
+ defp task(msg, task = %{"type" => "completions", "prompt" => prompt}, content) do
+ prompt = Tmpl.render(prompt, msg, %{"content" => content})
+ args = Map.get(task, "openai_params")
+ |> Map.put("prompt", prompt)
+ |> Map.put("user", msg.account.id)
+ {moderate?, moderation} = moderation(content, msg.account.id)
+ if moderate?, do: msg.replyfun.("⚠️ offensive input: #{Enum.join(moderation, ", ")}")
+ Logger.debug("GPT: request #{inspect args}")
+ case OpenAi.post("/v1/completions", args) do
+ {:ok, %{"choices" => [%{"text" => text} | _]}} ->
+ {moderate?, moderation} = moderation(text, msg.account.id)
+ if moderate?, do: msg.replyfun.("🚨 offensive output: #{Enum.join(moderation, ", ")}")
+ msg.replyfun.(String.trim(text))
+ error ->
+ Logger.error("gpt error: #{inspect error}")
+ msg.replyfun.("gpt: ☠️ ")
+ end
+ end
+
+ defp moderation(content, user_id) do
+ case OpenAi.post("/v1/moderations", %{"input" => content, "user" => user_id}) do
+ {:ok, %{"results" => [%{"flagged" => true, "categories" => categories} | _]}} ->
+ cat = categories
+ |> Enum.filter(fn({_key, value}) -> value end)
+ |> Enum.map(fn({key, _}) -> key end)
+ {true, cat}
+ {:ok, moderation} ->
+ Logger.debug("gpt: moderation: not flagged, #{inspect moderation}")
+ {false, true}
+ error ->
+ Logger.error("gpt: moderation error: #{inspect error}")
+ {false, false}
+ end
+ end
+
+end
diff --git a/lib/open_ai.ex b/lib/open_ai.ex
new file mode 100644
index 0000000..9feb9a4
--- /dev/null
+++ b/lib/open_ai.ex
@@ -0,0 +1,17 @@
+defmodule OpenAi do
+
+ def post(path, data, options \\ []) do
+ config = Application.get_env(:lsg, :openai, [])
+ url = "https://api.openai.com#{path}"
+ headers = [{"user-agent", "internal private experiment bot, href@random.sh"},
+ {"content-type", "application/json"},
+ {"authorization", "Bearer " <> Keyword.get(config, :key, "unset-api-key")}]
+ options = options ++ [timeout: :timer.seconds(180), recv_timeout: :timer.seconds(180)]
+ with {:ok, json} <- Poison.encode(data),
+ {:ok, %HTTPoison.Response{status_code: 200, body: body}} <- HTTPoison.post(url, json, headers, options),
+ {:ok, data} <- Poison.decode(body) do
+ {:ok, data}
+ end
+ end
+
+end
File Metadata
Details
Attached
Mime Type
text/x-diff
Expires
Sun, Apr 27, 12:13 PM (17 h, 25 m)
Storage Engine
blob
Storage Format
Raw Data
Storage Handle
38850
Default Alt Text
(5 KB)
Attached To
rNOLA Nola
Event Timeline
Log In to Comment