aboutsummaryrefslogtreecommitdiff
path: root/lib
diff options
context:
space:
mode:
authorrinpatch <rinpatch@sdf.org>2020-03-14 15:39:58 +0300
committerrinpatch <rinpatch@sdf.org>2020-03-15 15:59:17 +0300
commite87a32bcd7ee7d6bb5e9b6882a8685b5ee4c180c (patch)
tree73312030559c0f706028c09f05505189d77b3d29 /lib
parent14ebf8f1e5411337d63d6372e6229da1b2f28316 (diff)
downloadpleroma-e87a32bcd7ee7d6bb5e9b6882a8685b5ee4c180c.tar.gz
rip out fetch_initial_posts
Every time someone tries to use it, it goes mad and tries to scrape the entire fediverse for no visible reason, it's better to just remove it than continue shipping it in it's current state. idea acked by lain and feld on irc Closes #1595 #1422
Diffstat (limited to 'lib')
-rw-r--r--lib/pleroma/user.ex32
-rw-r--r--lib/pleroma/web/activity_pub/utils.ex39
-rw-r--r--lib/pleroma/workers/background_worker.ex4
3 files changed, 1 insertions, 74 deletions
diff --git a/lib/pleroma/user.ex b/lib/pleroma/user.ex
index 7531757f5..db510d957 100644
--- a/lib/pleroma/user.ex
+++ b/lib/pleroma/user.ex
@@ -839,10 +839,6 @@ defmodule Pleroma.User do
_e ->
with [_nick, _domain] <- String.split(nickname, "@"),
{:ok, user} <- fetch_by_nickname(nickname) do
- if Pleroma.Config.get([:fetch_initial_posts, :enabled]) do
- fetch_initial_posts(user)
- end
-
{:ok, user}
else
_e -> {:error, "not found " <> nickname}
@@ -850,11 +846,6 @@ defmodule Pleroma.User do
end
end
- @doc "Fetch some posts when the user has just been federated with"
- def fetch_initial_posts(user) do
- BackgroundWorker.enqueue("fetch_initial_posts", %{"user_id" => user.id})
- end
-
@spec get_followers_query(User.t(), pos_integer() | nil) :: Ecto.Query.t()
def get_followers_query(%User{} = user, nil) do
User.Query.build(%{followers: user, deactivated: false})
@@ -1320,16 +1311,6 @@ defmodule Pleroma.User do
Repo.delete(user)
end
- def perform(:fetch_initial_posts, %User{} = user) do
- pages = Pleroma.Config.get!([:fetch_initial_posts, :pages])
-
- # Insert all the posts in reverse order, so they're in the right order on the timeline
- user.source_data["outbox"]
- |> Utils.fetch_ordered_collection(pages)
- |> Enum.reverse()
- |> Enum.each(&Pleroma.Web.Federator.incoming_ap_doc/1)
- end
-
def perform(:deactivate_async, user, status), do: deactivate(user, status)
@spec perform(atom(), User.t(), list()) :: list() | {:error, any()}
@@ -1458,18 +1439,7 @@ defmodule Pleroma.User do
if !is_nil(user) and !needs_update?(user) do
{:ok, user}
else
- # Whether to fetch initial posts for the user (if it's a new user & the fetching is enabled)
- should_fetch_initial = is_nil(user) and Pleroma.Config.get([:fetch_initial_posts, :enabled])
-
- resp = fetch_by_ap_id(ap_id)
-
- if should_fetch_initial do
- with {:ok, %User{} = user} <- resp do
- fetch_initial_posts(user)
- end
- end
-
- resp
+ fetch_by_ap_id(ap_id)
end
end
diff --git a/lib/pleroma/web/activity_pub/utils.ex b/lib/pleroma/web/activity_pub/utils.ex
index 2bc958670..15dd2ed45 100644
--- a/lib/pleroma/web/activity_pub/utils.ex
+++ b/lib/pleroma/web/activity_pub/utils.ex
@@ -784,45 +784,6 @@ defmodule Pleroma.Web.ActivityPub.Utils do
defp build_flag_object(_), do: []
- @doc """
- Fetches the OrderedCollection/OrderedCollectionPage from `from`, limiting the amount of pages fetched after
- the first one to `pages_left` pages.
- If the amount of pages is higher than the collection has, it returns whatever was there.
- """
- def fetch_ordered_collection(from, pages_left, acc \\ []) do
- with {:ok, response} <- Tesla.get(from),
- {:ok, collection} <- Jason.decode(response.body) do
- case collection["type"] do
- "OrderedCollection" ->
- # If we've encountered the OrderedCollection and not the page,
- # just call the same function on the page address
- fetch_ordered_collection(collection["first"], pages_left)
-
- "OrderedCollectionPage" ->
- if pages_left > 0 do
- # There are still more pages
- if Map.has_key?(collection, "next") do
- # There are still more pages, go deeper saving what we have into the accumulator
- fetch_ordered_collection(
- collection["next"],
- pages_left - 1,
- acc ++ collection["orderedItems"]
- )
- else
- # No more pages left, just return whatever we already have
- acc ++ collection["orderedItems"]
- end
- else
- # Got the amount of pages needed, add them all to the accumulator
- acc ++ collection["orderedItems"]
- end
-
- _ ->
- {:error, "Not an OrderedCollection or OrderedCollectionPage"}
- end
- end
- end
-
#### Report-related helpers
def get_reports(params, page, page_size) do
params =
diff --git a/lib/pleroma/workers/background_worker.ex b/lib/pleroma/workers/background_worker.ex
index 598df6580..0f8ece2c4 100644
--- a/lib/pleroma/workers/background_worker.ex
+++ b/lib/pleroma/workers/background_worker.ex
@@ -10,10 +10,6 @@ defmodule Pleroma.Workers.BackgroundWorker do
use Pleroma.Workers.WorkerHelper, queue: "background"
@impl Oban.Worker
- def perform(%{"op" => "fetch_initial_posts", "user_id" => user_id}, _job) do
- user = User.get_cached_by_id(user_id)
- User.perform(:fetch_initial_posts, user)
- end
def perform(%{"op" => "deactivate_user", "user_id" => user_id, "status" => status}, _job) do
user = User.get_cached_by_id(user_id)