diff --git a/lib/plugins/gpt.ex b/lib/plugins/gpt.ex deleted file mode 100644 index 8ee8c10..0000000 --- a/lib/plugins/gpt.ex +++ /dev/null @@ -1,355 +0,0 @@ -defmodule Nola.Plugins.Gpt do - require Logger - import Nola.Plugins.TempRefHelper - - def irc_doc() do - """ - # OpenAI GPT - - Uses OpenAI's GPT-3 API to bring natural language prompts to your IRC channel. - - _prompts_ are pre-defined prompts and parameters defined in the bot' CouchDB. - - _Runs_ (results of the inference of a _prompt_) are also stored in CouchDB and - may be resumed. - - * **!gpt** list GPT prompts - * **!gpt `[prompt]` ``** run a prompt - * **+gpt `[short ref|run id]` ``** continue a prompt - * **?gpt offensive ``** is content offensive ? - * **?gpt show `[short ref|run id]`** run information and web link - * **?gpt `[prompt]`** prompt information and web link - """ - end - - @couch_db "bot-plugin-openai-prompts" - @couch_run_db "bot-plugin-gpt-history" - @trigger "gpt" - - def start_link() do - GenServer.start_link(__MODULE__, [], name: __MODULE__) - end - - defstruct [:temprefs] - - def get_result(id) do - Couch.get(@couch_run_db, id) - end - - def get_prompt(id) do - Couch.get(@couch_db, id) - end - - def init(_) do - regopts = [plugin: __MODULE__] - {:ok, _} = Registry.register(Nola.PubSub, "trigger:#{@trigger}", regopts) - {:ok, %__MODULE__{temprefs: new_temp_refs()}} - end - - def handle_info({:irc, :trigger, @trigger, m = %Nola.Message{trigger: %Nola.Trigger{type: :bang, args: [prompt | args]}}}, state) do - case Couch.get(@couch_db, prompt) do - {:ok, prompt} -> {:noreply, prompt(m, prompt, Enum.join(args, " "), state)} - {:error, :not_found} -> - m.replyfun.("gpt: prompt '#{prompt}' does not exists") - {:noreply, state} - error -> - Logger.info("gpt: prompt load error: #{inspect error}") - m.replyfun.("gpt: database error") - {:noreply, state} - end - end - - def handle_info({:irc, :trigger, @trigger, m = %Nola.Message{trigger: %Nola.Trigger{type: :bang, args: []}}}, state) do - case Couch.get(@couch_db, "_all_docs") do - {:ok, %{"rows" => []}} -> m.replyfun.("gpt: no prompts available") - {:ok, %{"rows" => prompts}} -> - prompts = prompts |> Enum.map(fn(prompt) -> Map.get(prompt, "id") end) |> Enum.join(", ") - m.replyfun.("gpt: prompts: #{prompts}") - error -> - Logger.info("gpt: prompt load error: #{inspect error}") - m.replyfun.("gpt: database error") - end - {:noreply, state} - end - - def handle_info({:irc, :trigger, @trigger, m = %Nola.Message{trigger: %Nola.Trigger{type: :plus, args: [ref_or_id | args]}}}, state) do - id = lookup_temp_ref(ref_or_id, state.temprefs, ref_or_id) - case Couch.get(@couch_run_db, id) do - {:ok, run} -> - Logger.debug("+gpt run: #{inspect run}") - {:noreply, continue_prompt(m, run, Enum.join(args, " "), state)} - {:error, :not_found} -> - m.replyfun.("gpt: ref or id not found or expired: #{inspect ref_or_id} (if using short ref, try using full id)") - {:noreply, state} - error -> - Logger.info("+gpt: run load error: #{inspect error}") - m.replyfun.("gpt: database error") - {:noreply, state} - end - end - - def handle_info({:irc, :trigger, @trigger, m = %Nola.Message{trigger: %Nola.Trigger{type: :query, args: ["offensive" | text]}}}, state) do - text = Enum.join(text, " ") - {moderate?, moderation} = moderation(text, m.account.id) - reply = cond do - moderate? -> "⚠️ #{Enum.join(moderation, ", ")}" - !moderate? && moderation -> "👍" - !moderate? -> "☠️ error" - end - m.replyfun.(reply) - {:noreply, state} - end - - def handle_info({:irc, :trigger, @trigger, m = %Nola.Message{trigger: %Nola.Trigger{type: :query, args: ["show", ref_or_id]}}}, state) do - id = lookup_temp_ref(ref_or_id, state.temprefs, ref_or_id) - url = if m.channel do - NolaWeb.Router.Helpers.gpt_url(NolaWeb.Endpoint, :result, m.network, NolaWeb.format_chan(m.channel), id) - else - NolaWeb.Router.Helpers.gpt_url(NolaWeb.Endpoint, :result, id) - end - m.replyfun.("→ #{url}") - {:noreply, state} - end - - def handle_info({:irc, :trigger, @trigger, m = %Nola.Message{trigger: %Nola.Trigger{type: :query, args: [prompt]}}}, state) do - url = if m.channel do - NolaWeb.Router.Helpers.gpt_url(NolaWeb.Endpoint, :prompt, m.network, NolaWeb.format_chan(m.channel), prompt) - else - NolaWeb.Router.Helpers.gpt_url(NolaWeb.Endpoint, :prompt, prompt) - end - m.replyfun.("→ #{url}") - {:noreply, state} - end - - def handle_info(info, state) do - Logger.debug("gpt: unhandled info: #{inspect info}") - {:noreply, state} - end - - defp continue_prompt(msg, run, content, state) do - prompt_id = Map.get(run, "prompt_id") - prompt_rev = Map.get(run, "prompt_rev") - - original_prompt = case Couch.get(@couch_db, prompt_id, rev: prompt_rev) do - {:ok, prompt} -> prompt - _ -> nil - end - - if original_prompt do - continue_prompt = %{"_id" => prompt_id, - "_rev" => prompt_rev, - "type" => Map.get(original_prompt, "type"), - "parent_run_id" => Map.get(run, "_id"), - "openai_params" => Map.get(run, "request") |> Map.delete("prompt")} - - continue_prompt = case original_prompt do - %{"continue_prompt" => prompt_string} when is_binary(prompt_string) -> - full_text = get_in(run, ~w(request prompt)) <> "\n" <> Map.get(run, "response") - continue_prompt - |> Map.put("prompt", prompt_string) - |> Map.put("prompt_format", "liquid") - |> Map.put("prompt_liquid_variables", %{"previous" => full_text}) - %{"messages" => _} -> - continue_prompt - |> Map.put("prompt", "{{content}}") - |> Map.put("prompt_format", "liquid") - |> Map.put("messages", Map.get(run, "messages")) - _ -> - prompt_content_tag = if content != "", do: " {{content}}", else: "" - string = get_in(run, ~w(request prompt)) <> "\n" <> Map.get(run, "response") <> prompt_content_tag - continue_prompt - |> Map.put("prompt", string) - |> Map.put("prompt_format", "liquid") - end - - prompt(msg, continue_prompt, content, state) - else - msg.replyfun.("gpt: cannot continue this prompt: original prompt not found #{prompt_id}@v#{prompt_rev}") - state - end - end - - # Chat prompt - # "prompt" is the template for the initial user message - # "messages" is original messages to be put before the initial user one - defp prompt(msg, prompt = %{"type" => "chat", "prompt" => prompt_template, "messages" => messages}, content, state) do - Logger.debug("gpt_plugin:prompt/4 (chat) #{inspect prompt}") - prompt_text = case Map.get(prompt, "prompt_format", "liquid") do - "liquid" -> Tmpl.render(prompt_template, msg, Map.merge(Map.get(prompt, "prompt_liquid_variables", %{}), %{"content" => content})) - "norender" -> prompt_template - end - - messages = Enum.map(messages, fn(%{"role" => role, "content" => text}) -> - text = case Map.get(prompt, "prompt_format", "liquid") do - "liquid" -> Tmpl.render(text, msg, Map.get(prompt, "prompt_liquid_variables", %{})) - "norender" -> text - end - %{"role" => role, "content" => text} - end) ++ [%{"role" => "user", "content" => prompt_text}] - - args = Map.get(prompt, "openai_params") - |> Map.put_new("model", "gpt-3.5-turbo") - |> Map.put("messages", messages) - |> Map.put("user", msg.account.id) - - {moderate?, moderation} = moderation(content, msg.account.id) - if moderate?, do: msg.replyfun.("⚠️ offensive input: #{Enum.join(moderation, ", ")}") - - Logger.debug("GPT: request #{inspect args}") - case OpenAi.post("/v1/chat/completions", args) do - {:ok, %{"choices" => [%{"message" => %{"content" => text}, "finish_reason" => finish_reason} | _], "usage" => usage, "id" => gpt_id, "created" => created}} -> - text = String.trim(text) - {o_moderate?, o_moderation} = moderation(text, msg.account.id) - if o_moderate?, do: msg.replyfun.("🚨 offensive output: #{Enum.join(o_moderation, ", ")}") - msg.replyfun.(text) - doc = %{"id" => FlakeId.get(), - "prompt_id" => Map.get(prompt, "_id"), - "prompt_rev" => Map.get(prompt, "_rev"), - "network" => msg.network, - "channel" => msg.channel, - "nick" => msg.sender.nick, - "account_id" => (if msg.account, do: msg.account.id), - "request" => args, - "messages" => messages ++ [%{"role" => "assistant", "content" => text}], - "message_at" => msg.at, - "reply_at" => DateTime.utc_now(), - "gpt_id" => gpt_id, - "gpt_at" => created, - "gpt_usage" => usage, - "type" => "chat", - "parent_run_id" => Map.get(prompt, "parent_run_id"), - "moderation" => %{"input" => %{flagged: moderate?, categories: moderation}, - "output" => %{flagged: o_moderate?, categories: o_moderation} - } - } - Logger.debug("Saving result to couch: #{inspect doc}") - {id, ref, temprefs} = case Couch.post(@couch_run_db, doc) do - {:ok, id, _rev} -> - {ref, temprefs} = put_temp_ref(id, state.temprefs) - {id, ref, temprefs} - error -> - Logger.error("Failed to save to Couch: #{inspect error}") - {nil, nil, state.temprefs} - end - stop = cond do - finish_reason == "stop" -> "" - finish_reason == "length" -> " — truncated" - true -> " — #{finish_reason}" - end - ref_and_prefix = if Map.get(usage, "completion_tokens", 0) == 0 do - "GPT had nothing else to say :( ↪ #{ref || "✗"}" - else - " ↪ #{ref || "✗"}" - end - msg.replyfun.(ref_and_prefix <> - stop <> - " — #{Map.get(usage, "total_tokens", 0)}" <> - " (#{Map.get(usage, "prompt_tokens", 0)}/#{Map.get(usage, "completion_tokens", 0)}) tokens" <> - " — #{id || "save failed"}") - %__MODULE__{state | temprefs: temprefs} - {:error, atom} when is_atom(atom) -> - Logger.error("gpt error: #{inspect atom}") - msg.replyfun.("gpt: ☠️ #{to_string(atom)}") - state - error -> - Logger.error("gpt error: #{inspect error}") - msg.replyfun.("gpt: ☠️ ") - state - end - end - - - defp prompt(msg, prompt = %{"type" => "completions", "prompt" => prompt_template}, content, state) do - Logger.debug("gpt:prompt/4 #{inspect prompt}") - prompt_text = case Map.get(prompt, "prompt_format", "liquid") do - "liquid" -> Tmpl.render(prompt_template, msg, Map.merge(Map.get(prompt, "prompt_liquid_variables", %{}), %{"content" => content})) - "norender" -> prompt_template - end - - args = Map.get(prompt, "openai_params") - |> Map.put("prompt", prompt_text) - |> Map.put("user", msg.account.id) - - {moderate?, moderation} = moderation(content, msg.account.id) - if moderate?, do: msg.replyfun.("⚠️ offensive input: #{Enum.join(moderation, ", ")}") - - Logger.debug("GPT: request #{inspect args}") - case OpenAi.post("/v1/completions", args) do - {:ok, %{"choices" => [%{"text" => text, "finish_reason" => finish_reason} | _], "usage" => usage, "id" => gpt_id, "created" => created}} -> - text = String.trim(text) - {o_moderate?, o_moderation} = moderation(text, msg.account.id) - if o_moderate?, do: msg.replyfun.("🚨 offensive output: #{Enum.join(o_moderation, ", ")}") - msg.replyfun.(text) - doc = %{"id" => FlakeId.get(), - "prompt_id" => Map.get(prompt, "_id"), - "prompt_rev" => Map.get(prompt, "_rev"), - "network" => msg.network, - "channel" => msg.channel, - "nick" => msg.sender.nick, - "account_id" => (if msg.account, do: msg.account.id), - "request" => args, - "response" => text, - "message_at" => msg.at, - "reply_at" => DateTime.utc_now(), - "gpt_id" => gpt_id, - "gpt_at" => created, - "gpt_usage" => usage, - "type" => "completions", - "parent_run_id" => Map.get(prompt, "parent_run_id"), - "moderation" => %{"input" => %{flagged: moderate?, categories: moderation}, - "output" => %{flagged: o_moderate?, categories: o_moderation} - } - } - Logger.debug("Saving result to couch: #{inspect doc}") - {id, ref, temprefs} = case Couch.post(@couch_run_db, doc) do - {:ok, id, _rev} -> - {ref, temprefs} = put_temp_ref(id, state.temprefs) - {id, ref, temprefs} - error -> - Logger.error("Failed to save to Couch: #{inspect error}") - {nil, nil, state.temprefs} - end - stop = cond do - finish_reason == "stop" -> "" - finish_reason == "length" -> " — truncated" - true -> " — #{finish_reason}" - end - ref_and_prefix = if Map.get(usage, "completion_tokens", 0) == 0 do - "GPT had nothing else to say :( ↪ #{ref || "✗"}" - else - " ↪ #{ref || "✗"}" - end - msg.replyfun.(ref_and_prefix <> - stop <> - " — #{Map.get(usage, "total_tokens", 0)}" <> - " (#{Map.get(usage, "prompt_tokens", 0)}/#{Map.get(usage, "completion_tokens", 0)}) tokens" <> - " — #{id || "save failed"}") - %__MODULE__{state | temprefs: temprefs} - {:error, atom} when is_atom(atom) -> - Logger.error("gpt error: #{inspect atom}") - msg.replyfun.("gpt: ☠️ #{to_string(atom)}") - state - error -> - Logger.error("gpt error: #{inspect error}") - msg.replyfun.("gpt: ☠️ ") - state - end - end - - defp moderation(content, user_id) do - case OpenAi.post("/v1/moderations", %{"input" => content, "user" => user_id}) do - {:ok, %{"results" => [%{"flagged" => true, "categories" => categories} | _]}} -> - cat = categories - |> Enum.filter(fn({_key, value}) -> value end) - |> Enum.map(fn({key, _}) -> key end) - {true, cat} - {:ok, moderation} -> - Logger.debug("gpt: moderation: not flagged, #{inspect moderation}") - {false, true} - error -> - Logger.error("gpt: moderation error: #{inspect error}") - {false, false} - end - end - -end diff --git a/lib/plugins/image.ex b/lib/plugins/image.ex deleted file mode 100644 index 446cb49..0000000 --- a/lib/plugins/image.ex +++ /dev/null @@ -1,246 +0,0 @@ -defmodule Nola.Plugins.Image do - require Logger - import Nola.Plugins.TempRefHelper - - def irc_doc() do - """ - # Image Generation - - * **`!d2 [-n 1..10] [-g 256, 512, 1024] `** generate image(s) using OpenAI Dall-E 2 - * **`!sd [options] `** generate image(s) using Stable Diffusion models (see below) - - ## !sd - - * `-m X` (sd2) Model (sd2: Stable Diffusion v2, sd1: Stable Diffusion v1.5, any3: Anything v3, any4: Anything v4, oj: OpenJourney) - * `-w X, -h X` (512) width and height. (128, 256, 384, 448, 512, 576, 640, 704, 768) - * `-n 1..10` (1) number of images to generate - * `-s X` (null) Seed - * `-S 0..500` (50) denoising steps - * `-X X` (KLMS) scheduler (DDIM, K_EULER, DPMSolverMultistep, K_EULER_ANCESTRAL, PNDM, KLMS) - * `-g 1..20` (7.5) guidance scale - * `-P 0.0..1.0` (0.8) prompt strength - """ - end - - def start_link() do - GenServer.start_link(__MODULE__, [], name: __MODULE__) - end - - defstruct [:temprefs] - - def init(_) do - regopts = [plugin: __MODULE__] - {:ok, _} = Registry.register(Nola.PubSub, "trigger:d2", regopts) - {:ok, _} = Registry.register(Nola.PubSub, "trigger:sd", regopts) - {:ok, %__MODULE__{temprefs: new_temp_refs()}} - end - - def handle_info({:irc, :trigger, "sd", msg = %Nola.Message{trigger: %Nola.Trigger{type: :bang, args: args}}}, state) do - {:noreply, case OptionParser.parse(args, aliases: [m: :model], strict: [model: :string]) do - {_, [], _} -> - msg.replyfun.("#{msg.sender.nick}: sd: missing prompt") - state - {opts, prompt, _} -> - process_sd(Keyword.get(opts, :model, "sd2"), Enum.join(prompt, " "), msg, state) - end} - end - - def handle_info({:irc, :trigger, "d2", msg = %Nola.Message{trigger: %Nola.Trigger{type: :bang, args: args}}}, state) do - opts = OptionParser.parse(args, - aliases: [n: :n, g: :geometry], - strict: [n: :integer, geometry: :integer] - ) - case opts do - {_opts, [], _} -> - msg.replyfun.("#{msg.sender.nick}: d2: missing prompt") - {:noreply, state} - {opts, prompts, _} -> - prompt = Enum.join(prompts, " ") - geom = Keyword.get(opts, :geometry, 256) - request = %{ - "prompt" => prompt, - "n" => Keyword.get(opts, :n, 1), - "size" => "#{geom}x#{geom}", - "response_format" => "b64_json", - "user" => msg.account.id, - } - - id = FlakeId.get() - - state = case OpenAi.post("/v1/images/generations", request) do - {:ok, %{"data" => data}} -> - urls = for {%{"b64_json" => b64}, idx} <- Enum.with_index(data) do - with {:ok, body} <- Base.decode64(b64), - <> = body, - {:ok, magic} <- GenMagic.Pool.perform(Nola.GenMagic, {:bytes, smol_body}), - bucket = Application.get_env(:nola, :s3, []) |> Keyword.get(:bucket), - s3path = "#{msg.account.id}/iD2#{id}#{idx}.png", - s3req = ExAws.S3.put_object(bucket, s3path, body, acl: :public_read, content_type: magic.mime_type), - {:ok, _} <- ExAws.request(s3req), - path = NolaWeb.Router.Helpers.url(NolaWeb.Endpoint) <> "/files/#{s3path}" - do - {:ok, path} - end - end - - urls = for {:ok, path} <- urls, do: path - msg.replyfun.("#{msg.sender.nick}: #{Enum.join(urls, " ")}") - state - {:error, atom} when is_atom(atom) -> - Logger.error("dalle2: #{inspect atom}") - msg.replyfun.("#{msg.sender.nick}: dalle2: ☠️ #{to_string(atom)}") - state - error -> - Logger.error("dalle2: #{inspect error}") - msg.replyfun.("#{msg.sender.nick}: dalle2: ☠️ ") - state - end - {:noreply, state} - end - end - - defp process_sd(model, prompt, msg, state) do - {general_opts, _, _} = OptionParser.parse(msg.trigger.args, - aliases: [n: :number, w: :width, h: :height], - strict: [number: :integer, width: :integer, height: :integer] - ) - - general_opts = general_opts - |> Keyword.put_new(:number, 1) - - case sd_model(model, prompt, general_opts, msg.trigger.args) do - {:ok, env} -> - base_url = "https://api.runpod.ai/v1/#{env.name}" - {headers, options} = runpod_headers(env, state) - result = with {:ok, json} <- Poison.encode(%{"input" => env.request}), - {:ok, %HTTPoison.Response{status_code: 200, body: body}} <- HTTPoison.post("#{base_url}/run", json, headers, options), - {:ok, %{"id" => id} = data} <- Poison.decode(body) do - Logger.debug("runpod: started job #{id}: #{inspect data}") - spawn(fn() -> runpod_result_loop("#{base_url}/status/#{id}", env, msg, state) end) - :ok - else - {:ok, %HTTPoison.Response{status_code: code}} -> {:error, Plug.Conn.Status.reason_atom(code)} - {:error, %HTTPoison.Error{reason: reason}} -> {:error, reason} - end - - case result do - {:error, reason} -> - Logger.error("runpod: http error for #{base_url}/run: #{inspect reason}") - msg.replyfun.("#{msg.sender.nick}: sd: runpod failed: #{inspect reason}") - _ -> :ok - end - {:error, error} -> - msg.replyfun.("#{msg.sender.nick}: sd: #{error}") - end - - state - end - - defp runpod_result_loop(url, env, msg, state) do - Logger.debug("runpod_result_loop: new") - {headers, options} = runpod_headers(env, state) - with {:ok, %HTTPoison.Response{status_code: 200, body: body}} <- HTTPoison.get(url, headers ++ [{"content-type", "application/json"}], options), - {:ok, %{"status" => "COMPLETED"} = data} <- Poison.decode(body) do - id = FlakeId.get() - tasks = for {%{"image" => url, "seed" => seed}, idx} <- Enum.with_index(Map.get(data, "output", [])) do - Task.async(fn() -> -with {:ok, %HTTPoison.Response{status_code: 200, body: body}} <- HTTPoison.get(url, [], options), -bucket = Application.get_env(:nola, :s3, []) |> Keyword.get(:bucket), -s3path = "#{msg.account.id}/iR#{env.nick}#{id}#{idx}-#{seed}.png", -s3req = ExAws.S3.put_object(bucket, s3path, body, acl: :public_read, content_type: "image/png"), -{:ok, _} <- ExAws.request(s3req), -path = NolaWeb.Router.Helpers.url(NolaWeb.Endpoint) <> "/files/#{s3path}" -do - {:ok, path} -else - error -> - Logger.error("runpod_result: error while uploading #{url}: #{inspect error}") - {:error, error} -end - end) - end - |> Task.yield_many(5000) - |> Enum.map(fn {task, res} -> - res || Task.shutdown(task, :brutal_kill) - end) - - results = for({:ok, {:ok, url}} <- tasks, do: url) - - msg.replyfun.("#{msg.sender.nick}: #{Enum.join(results, " ")}") - else - {:ok, %{"status" => "FAILED"} = data} -> - Logger.error("runpod_result_loop: job FAILED: #{inspect data}") - msg.replyfun.("#{msg.sender.nick}: sd: job failed: #{Map.get(data, "error", "error")}") - {:ok, %{"status" => _} = data} -> - Logger.debug("runpod_result_loop: not completed: #{inspect data}") - :timer.sleep(:timer.seconds(1)) - runpod_result_loop(url, env, msg, state) - {:ok, %HTTPoison.Response{status_code: 403}} -> - msg.replyfun.("#{msg.sender.nick}: sd: runpod failure: unauthorized") - error -> - Logger.warning("image: sd: runpod http error: #{inspect error}") - :timer.sleep(:timer.seconds(2)) - runpod_result_loop(url, env, msg, state) - end - end - - defp runpod_headers(_env, _state) do - config = Application.get_env(:nola, :runpod, []) - headers = [{"user-agent", "nola.lol bot, href@random.sh"}, - {"authorization", "Bearer " <> Keyword.get(config, :key, "unset-api-key")}] - options = [timeout: :timer.seconds(180), recv_timeout: :timer.seconds(180)] - {headers, options} - end - - defp sd_model(name, _, general_opts, opts) when name in ~w(sd2 sd1 oj any any4) do - {opts, prompt, _} = OptionParser.parse(opts, [ - aliases: [P: :strength, s: :seed, S: :steps, g: :guidance, X: :scheduler, q: :negative], - strict: [strength: :float, steps: :integer, guidance: :float, scheduler: :string, seed: :integer, negative: :keep] - ]) - opts = general_opts ++ opts - prompt = Enum.join(prompt, " ") - - negative = case Keyword.get_values(opts, :negative) do - [] -> nil - list -> Enum.join(list, " ") - end - - full_name = case name do - "sd2" -> "stable-diffusion-v2" - "sd1" -> "stable-diffusion-v1" - "oj" -> "sd-openjourney" - "any" -> "sd-anything-v3" - "any4" -> "sd-anything-v4" - end - - default_scheduler = case name do - "sd2" -> "KLMS" - _ -> "K-LMS" - end - - request = %{ - "prompt" => prompt, - "num_outputs" => general_opts[:number], - "width" => opts[:width] || 512, - "height" => opts[:height] || 512, - "prompt_strength" => opts[:strength] || 0.8, - "num_inference_steps" => opts[:steps] || 30, - "guidance_scale" => opts[:guidance] || 7.5, - "scheduler" => opts[:scheduler] || default_scheduler, - "seed" => opts[:seed] || :rand.uniform(100_000_00) - } - - request = if negative do - Map.put(request, "negative_prompt", negative) - else - request - end - - {:ok, %{name: full_name, nick: name, request: request}} - end - - defp sd_model(name, _, _, _) do - {:error, "unsupported model: \"#{name}\""} - end - -end diff --git a/lib/web/controllers/gpt_controller.ex b/lib/web/controllers/gpt_controller.ex deleted file mode 100644 index 810a875..0000000 --- a/lib/web/controllers/gpt_controller.ex +++ /dev/null @@ -1,33 +0,0 @@ -defmodule NolaWeb.GptController do - use NolaWeb, :controller - require Logger - - plug NolaWeb.ContextPlug - - def result(conn, params = %{"id" => result_id}) do - case Nola.Plugins.Gpt.get_result(result_id) do - {:ok, result} -> - network = Map.get(params, "network") - channel = if c = Map.get(params, "chan"), do: NolaWeb.reformat_chan(c) - render(conn, "result.html", network: network, channel: channel, result: result) - {:error, :not_found} -> - conn - |> put_status(404) - |> text("Page not found") - end - end - - def prompt(conn, params = %{"id" => prompt_id}) do - case Nola.Plugins.Gpt.get_prompt(prompt_id) do - {:ok, prompt} -> - network = Map.get(params, "network") - channel = if c = Map.get(params, "chan"), do: NolaWeb.reformat_chan(c) - render(conn, "prompt.html", network: network, channel: channel, prompt: prompt) - {:error, :not_found} -> - conn - |> put_status(404) - |> text("Page not found") - end - end - -end diff --git a/lib/web/router.ex b/lib/web/router.ex index 5658fda..fb0df63 100644 --- a/lib/web/router.ex +++ b/lib/web/router.ex @@ -1,85 +1,89 @@ defmodule NolaWeb.Router do use NolaWeb, :router pipeline :browser do plug :accepts, ["html", "txt"] plug :fetch_session plug :fetch_flash plug :fetch_live_flash plug :protect_from_forgery plug :put_secure_browser_headers plug :put_root_layout, {NolaWeb.LayoutView, :root} end pipeline :api do plug :accepts, ["json", "sse"] end pipeline :matrix_app_service do plug :accepts, ["json"] plug Nola.Matrix.Plug.Auth plug Nola.Matrix.Plug.SetConfig end scope "/api", NolaWeb do pipe_through :api get "/irc-auth.sse", IrcAuthSseController, :sse post "/sms/callback/Ovh", SmsController, :ovh_callback, as: :sms end scope "/", NolaWeb do pipe_through :browser get "/", PageController, :index get "/login/irc/:token", PageController, :token, as: :login get "/login/oidc", OpenIdController, :login get "/login/oidc/callback", OpenIdController, :callback get "/api/untappd/callback", UntappdController, :callback, as: :untappd_callback get "/-", IrcController, :index get "/-/txt", IrcController, :txt get "/-/txt/:name", IrcController, :txt - get "/-/gpt/prompt/:id", GptController, :task - get "/-/gpt/result/:id", GptController, :result + + get "/-/gpt", GptController, :index + get "/-/gpt/p/:id", GptController, :task + get "/-/gpt/r/:id", GptController, :result get "/-/alcoolog", AlcoologController, :index get "/-/alcoolog/~/:account_name", AlcoologController, :index + get "/:network", NetworkController, :index + get "/:network/~:nick/alcoolog", AlcoologController, :nick get "/:network/~:nick/alcoolog/log.json", AlcoologController, :nick_log_json get "/:network/~:nick/alcoolog/gls.json", AlcoologController, :nick_gls_json get "/:network/~:nick/alcoolog/volumes.json", AlcoologController, :nick_volumes_json get "/:network/~:nick/alcoolog/history.json", AlcoologController, :nick_history_json get "/:network/~:nick/alcoolog/stats.json", AlcoologController, :nick_stats_json + get "/:network/:chan/alcoolog", AlcoologController, :index get "/:network/:chan/alcoolog/gls.json", AlcoologController, :index_gls_json - get "/:network/:chan/gpt/prompt/:id", GptController, :task - get "/:network/:chan/gpt/result/:id", GptController, :result + put "/api/alcoolog/minisync/:user_id/meta/:key", AlcoologController, :minisync_put_meta get "/:network/:chan", IrcController, :index live "/:network/:chan/live", ChatLive get "/:network/:chan/txt", IrcController, :txt get "/:network/:chan/txt/:name", IrcController, :txt get "/:network/:channel/preums", IrcController, :preums get "/:network/:chan/alcoolog/t/:token", AlcoologController, :token end scope "/_matrix/:appservice", MatrixAppServiceWeb.V1, as: :matrix do pipe_through :matrix_app_service put "/transactions/:txn_id", TransactionController, :push get "/users/:user_id", UserController, :query get "/rooms/*room_alias", RoomController, :query get "/thirdparty/protocol/:protocol", ThirdPartyController, :query_protocol get "/thirdparty/user/:protocol", ThirdPartyController, :query_users get "/thirdparty/location/:protocol", ThirdPartyController, :query_locations get "/thirdparty/location", ThirdPartyController, :query_location_by_alias get "/thirdparty/user", ThirdPartyController, :query_user_by_id end end