mirror of
https://git.pleroma.social/pleroma/pleroma.git
synced 2025-04-24 05:47:41 -04:00
Merge branch 'develop' into 'webhooks_'
# Conflicts: # config/config.exs # lib/pleroma/application.ex
This commit is contained in:
commit
89526e3d9f
415 changed files with 5302 additions and 1919 deletions
5
.gitignore
vendored
5
.gitignore
vendored
|
@ -6,7 +6,7 @@
|
|||
/test/instance
|
||||
/test/uploads
|
||||
/.elixir_ls
|
||||
/test/fixtures/DSCN0010_tmp.jpg
|
||||
/test/fixtures/DSCN0010_tmp*
|
||||
/test/fixtures/test_tmp.txt
|
||||
/test/fixtures/image_tmp.jpg
|
||||
/test/tmp/
|
||||
|
@ -60,3 +60,6 @@ pleroma.iml
|
|||
*~
|
||||
*#
|
||||
*.swp
|
||||
|
||||
archive-*
|
||||
.gitlab-ci-local
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
image: git.pleroma.social:5050/pleroma/pleroma/ci-base:elixir-1.13.4-otp-24
|
||||
image: git.pleroma.social:5050/pleroma/pleroma/ci-base:elixir-1.13.4-otp-25
|
||||
|
||||
variables: &global_variables
|
||||
# Only used for the release
|
||||
|
@ -18,9 +18,7 @@ workflow:
|
|||
- if: $CI_COMMIT_BRANCH
|
||||
|
||||
cache: &global_cache_policy
|
||||
key:
|
||||
files:
|
||||
- mix.lock
|
||||
key: $CI_JOB_IMAGE-$CI_COMMIT_SHORT_SHA
|
||||
paths:
|
||||
- deps
|
||||
- _build
|
||||
|
@ -72,7 +70,7 @@ check-changelog:
|
|||
tags:
|
||||
- amd64
|
||||
|
||||
build-1.13.4:
|
||||
build-1.13.4-otp-25:
|
||||
extends:
|
||||
- .build_changes_policy
|
||||
- .using-ci-base
|
||||
|
@ -80,13 +78,12 @@ build-1.13.4:
|
|||
script:
|
||||
- mix compile --force
|
||||
|
||||
build-1.15.7-otp-25:
|
||||
build-1.17.1-otp-26:
|
||||
extends:
|
||||
- .build_changes_policy
|
||||
- .using-ci-base
|
||||
stage: build
|
||||
image: git.pleroma.social:5050/pleroma/pleroma/ci-base:elixir-1.15-otp25
|
||||
allow_failure: true
|
||||
image: git.pleroma.social:5050/pleroma/pleroma/ci-base:elixir-1.17.1-otp-26
|
||||
script:
|
||||
- mix compile --force
|
||||
|
||||
|
@ -121,7 +118,7 @@ benchmark:
|
|||
- mix ecto.migrate
|
||||
- mix pleroma.load_testing
|
||||
|
||||
unit-testing-1.12.3:
|
||||
unit-testing-1.13.4-otp-25:
|
||||
extends:
|
||||
- .build_changes_policy
|
||||
- .using-ci-base
|
||||
|
@ -136,7 +133,7 @@ unit-testing-1.12.3:
|
|||
script: &testing_script
|
||||
- mix ecto.create
|
||||
- mix ecto.migrate
|
||||
- mix test --cover --preload-modules
|
||||
- mix pleroma.test_runner --cover --preload-modules
|
||||
coverage: '/^Line total: ([^ ]*%)$/'
|
||||
artifacts:
|
||||
reports:
|
||||
|
@ -144,34 +141,19 @@ unit-testing-1.12.3:
|
|||
coverage_format: cobertura
|
||||
path: coverage.xml
|
||||
|
||||
unit-testing-1.15.7-otp-25:
|
||||
unit-testing-1.17.1-otp-26:
|
||||
extends:
|
||||
- .build_changes_policy
|
||||
- .using-ci-base
|
||||
stage: test
|
||||
image: git.pleroma.social:5050/pleroma/pleroma/ci-base:elixir-1.15-otp25
|
||||
allow_failure: true
|
||||
image: git.pleroma.social:5050/pleroma/pleroma/ci-base:elixir-1.17.1-otp-26
|
||||
cache: *testing_cache_policy
|
||||
services: *testing_services
|
||||
script: *testing_script
|
||||
|
||||
unit-testing-1.12-erratic:
|
||||
extends:
|
||||
- .build_changes_policy
|
||||
- .using-ci-base
|
||||
stage: test
|
||||
retry: 2
|
||||
allow_failure: true
|
||||
cache: *testing_cache_policy
|
||||
services: *testing_services
|
||||
script:
|
||||
- mix ecto.create
|
||||
- mix ecto.migrate
|
||||
- mix test --only=erratic
|
||||
|
||||
formatting-1.13:
|
||||
formatting-1.15:
|
||||
extends: .build_changes_policy
|
||||
image: &formatting_elixir elixir:1.13-alpine
|
||||
image: &formatting_elixir elixir:1.15-alpine
|
||||
stage: lint
|
||||
cache: *testing_cache_policy
|
||||
before_script: ¤t_bfr_script
|
||||
|
@ -183,7 +165,7 @@ formatting-1.13:
|
|||
script:
|
||||
- mix format --check-formatted
|
||||
|
||||
cycles-1.13:
|
||||
cycles-1.15:
|
||||
extends: .build_changes_policy
|
||||
image: *formatting_elixir
|
||||
stage: lint
|
||||
|
|
1
changelog.d/3280-fix-emoji-ids.fix
Normal file
1
changelog.d/3280-fix-emoji-ids.fix
Normal file
|
@ -0,0 +1 @@
|
|||
Fix Emoji object IDs not always being valid
|
1
changelog.d/3904.security
Normal file
1
changelog.d/3904.security
Normal file
|
@ -0,0 +1 @@
|
|||
HTTP Security: By default, don't allow unsafe-eval. The setting needs to be changed to allow Flash emulation.
|
0
changelog.d/3907.skip
Normal file
0
changelog.d/3907.skip
Normal file
1
changelog.d/4167-strip-gps-info-in-png.fix
Normal file
1
changelog.d/4167-strip-gps-info-in-png.fix
Normal file
|
@ -0,0 +1 @@
|
|||
Ensure that StripLocation actually removes everything resembling GPS data from PNGs
|
1
changelog.d/add-rbl-mrf.add
Normal file
1
changelog.d/add-rbl-mrf.add
Normal file
|
@ -0,0 +1 @@
|
|||
Add DNSRBL MRF
|
1
changelog.d/adminfe-logger.change
Normal file
1
changelog.d/adminfe-logger.change
Normal file
|
@ -0,0 +1 @@
|
|||
Elixir Logger configuration is now longer permitted through AdminFE and ConfigDB
|
1
changelog.d/akkoma-prune-options.add
Normal file
1
changelog.d/akkoma-prune-options.add
Normal file
|
@ -0,0 +1 @@
|
|||
Add options to the mix prune_objects task
|
1
changelog.d/anti-mentionspam-mrf.add
Normal file
1
changelog.d/anti-mentionspam-mrf.add
Normal file
|
@ -0,0 +1 @@
|
|||
Add Anti-mention Spam MRF backported from Rebased
|
1
changelog.d/auth-fetch-exception.add
Normal file
1
changelog.d/auth-fetch-exception.add
Normal file
|
@ -0,0 +1 @@
|
|||
HTTPSignaturePlug: Add :authorized_fetch_mode_exceptions configuration
|
1
changelog.d/authorized-fetch-rejections.add
Normal file
1
changelog.d/authorized-fetch-rejections.add
Normal file
|
@ -0,0 +1 @@
|
|||
Add an option to reject certain domains when authorized fetch is enabled.
|
1
changelog.d/bandit_update_1.5.2.change
Normal file
1
changelog.d/bandit_update_1.5.2.change
Normal file
|
@ -0,0 +1 @@
|
|||
Update Bandit to 1.5.2
|
0
changelog.d/ci-cache.skip
Normal file
0
changelog.d/ci-cache.skip
Normal file
0
changelog.d/ci-elixir-1.16.skip
Normal file
0
changelog.d/ci-elixir-1.16.skip
Normal file
0
changelog.d/ci-elixir-1.17.skip
Normal file
0
changelog.d/ci-elixir-1.17.skip
Normal file
0
changelog.d/ci-erratic.skip
Normal file
0
changelog.d/ci-erratic.skip
Normal file
0
changelog.d/ci-otp-update.skip
Normal file
0
changelog.d/ci-otp-update.skip
Normal file
0
changelog.d/cleanup.skip
Normal file
0
changelog.d/cleanup.skip
Normal file
1
changelog.d/cowboy-stream-chunked.fix
Normal file
1
changelog.d/cowboy-stream-chunked.fix
Normal file
|
@ -0,0 +1 @@
|
|||
Restore Cowboy's ability to stream MediaProxy responses without Chunked encoding.
|
0
changelog.d/debug-logs.skip
Normal file
0
changelog.d/debug-logs.skip
Normal file
2
changelog.d/deps-bump-2024-06-07.skip
Normal file
2
changelog.d/deps-bump-2024-06-07.skip
Normal file
|
@ -0,0 +1,2 @@
|
|||
Update dependencies held back due to old Elixir version
|
||||
|
0
changelog.d/deps-poison-test-only.skip
Normal file
0
changelog.d/deps-poison-test-only.skip
Normal file
1
changelog.d/docs-netbsd-update.change
Normal file
1
changelog.d/docs-netbsd-update.change
Normal file
|
@ -0,0 +1 @@
|
|||
Update and extend NetBSD installation docs
|
1
changelog.d/elixir-1.15.fix
Normal file
1
changelog.d/elixir-1.15.fix
Normal file
|
@ -0,0 +1 @@
|
|||
Elixir 1.15 compatibility
|
1
changelog.d/fix-mrfs.add
Normal file
1
changelog.d/fix-mrfs.add
Normal file
|
@ -0,0 +1 @@
|
|||
Added a Mix task "pleroma.config fix_mrf_policies" which will remove erroneous MRF policies from ConfigDB.
|
1
changelog.d/group-repeats.fix
Normal file
1
changelog.d/group-repeats.fix
Normal file
|
@ -0,0 +1 @@
|
|||
Deactivated groups would still try to repeat a post.
|
0
changelog.d/gun-logs-debug.skip
Normal file
0
changelog.d/gun-logs-debug.skip
Normal file
1
changelog.d/gun_pool4.fix
Normal file
1
changelog.d/gun_pool4.fix
Normal file
|
@ -0,0 +1 @@
|
|||
Gun Connection Pool was not retrying to acquire a connection if the pool was full and stale connections were reclaimed
|
0
changelog.d/ingestion-queue.skip
Normal file
0
changelog.d/ingestion-queue.skip
Normal file
1
changelog.d/ipfs-dialyzer.skip
Normal file
1
changelog.d/ipfs-dialyzer.skip
Normal file
|
@ -0,0 +1 @@
|
|||
no comment
|
1
changelog.d/ldap-error-logging.change
Normal file
1
changelog.d/ldap-error-logging.change
Normal file
|
@ -0,0 +1 @@
|
|||
Improve error logging when LDAP authentication fails.
|
1
changelog.d/ldap.fix
Normal file
1
changelog.d/ldap.fix
Normal file
|
@ -0,0 +1 @@
|
|||
Fix LDAP support
|
1
changelog.d/logger-metadata.add
Normal file
1
changelog.d/logger-metadata.add
Normal file
|
@ -0,0 +1 @@
|
|||
Logger metadata is now attached to some logs to help with troubleshooting and analysis
|
1
changelog.d/mediaproxy-http.fix
Normal file
1
changelog.d/mediaproxy-http.fix
Normal file
|
@ -0,0 +1 @@
|
|||
Ensure MediaProxy HTTP requests obey all the defined connection settings
|
1
changelog.d/missing-fks.add
Normal file
1
changelog.d/missing-fks.add
Normal file
|
@ -0,0 +1 @@
|
|||
Add missing indexes on foreign key relationships
|
1
changelog.d/mix-indexer.add
Normal file
1
changelog.d/mix-indexer.add
Normal file
|
@ -0,0 +1 @@
|
|||
Permit passing --chunk and --step values to the Pleroma.Search.Indexer Mix task
|
1
changelog.d/mrf-nsfw-otp25.skip
Normal file
1
changelog.d/mrf-nsfw-otp25.skip
Normal file
|
@ -0,0 +1 @@
|
|||
noop
|
0
changelog.d/notification-spex.skip
Normal file
0
changelog.d/notification-spex.skip
Normal file
1
changelog.d/oban-cancel.change
Normal file
1
changelog.d/oban-cancel.change
Normal file
|
@ -0,0 +1 @@
|
|||
Changed some jobs to return :cancel on unrecoverable errors that should not be retried
|
0
changelog.d/oban-deprecated-discards.skip
Normal file
0
changelog.d/oban-deprecated-discards.skip
Normal file
1
changelog.d/oban-fetcher-rejected.change
Normal file
1
changelog.d/oban-fetcher-rejected.change
Normal file
|
@ -0,0 +1 @@
|
|||
Discard Remote Fetcher jobs which errored due to an MRF rejection.
|
1
changelog.d/oban-live_dashboard.add
Normal file
1
changelog.d/oban-live_dashboard.add
Normal file
|
@ -0,0 +1 @@
|
|||
Oban jobs can now be viewed in the Live Dashboard
|
1
changelog.d/oban-queues.change
Normal file
1
changelog.d/oban-queues.change
Normal file
|
@ -0,0 +1 @@
|
|||
Oban queues have refactored to simplify the queue design
|
1
changelog.d/oban-rich-media-errors.fix
Normal file
1
changelog.d/oban-rich-media-errors.fix
Normal file
|
@ -0,0 +1 @@
|
|||
Prevent Rich Media backfill jobs from retrying in cases where it is likely they will fail again.
|
1
changelog.d/oban-timeouts.change
Normal file
1
changelog.d/oban-timeouts.change
Normal file
|
@ -0,0 +1 @@
|
|||
Ensure all Oban jobs have timeouts defined
|
1
changelog.d/oban-user-refresh-unique.fix
Normal file
1
changelog.d/oban-user-refresh-unique.fix
Normal file
|
@ -0,0 +1 @@
|
|||
Oban Jobs for refreshing users were not respecting the uniqueness setting
|
1
changelog.d/pinned-collection-fetch.security
Normal file
1
changelog.d/pinned-collection-fetch.security
Normal file
|
@ -0,0 +1 @@
|
|||
Use proper workers for fetching pins instead of an ad-hoc task, fixing a potential fetch loop
|
1
changelog.d/pools.change
Normal file
1
changelog.d/pools.change
Normal file
|
@ -0,0 +1 @@
|
|||
HTTP connection pool adjustments
|
1
changelog.d/prometheus-docs.change
Normal file
1
changelog.d/prometheus-docs.change
Normal file
|
@ -0,0 +1 @@
|
|||
Update the documentation for configuring Prometheus metrics.
|
1
changelog.d/promexdocs.add
Normal file
1
changelog.d/promexdocs.add
Normal file
|
@ -0,0 +1 @@
|
|||
PromEx documentation
|
1
changelog.d/qdrant_search.add
Normal file
1
changelog.d/qdrant_search.add
Normal file
|
@ -0,0 +1 @@
|
|||
Add Qdrant/OpenAI embedding search
|
1
changelog.d/rich_media_backfill.change
Normal file
1
changelog.d/rich_media_backfill.change
Normal file
|
@ -0,0 +1 @@
|
|||
Rich Media backfilling is now an Oban job
|
0
changelog.d/rich_media_config.skip
Normal file
0
changelog.d/rich_media_config.skip
Normal file
0
changelog.d/rich_media_oban.skip
Normal file
0
changelog.d/rich_media_oban.skip
Normal file
0
changelog.d/rich_media_stream_test.skip
Normal file
0
changelog.d/rich_media_stream_test.skip
Normal file
0
changelog.d/spex-error-log.skip
Normal file
0
changelog.d/spex-error-log.skip
Normal file
1
changelog.d/stream-end-poll.fix
Normal file
1
changelog.d/stream-end-poll.fix
Normal file
|
@ -0,0 +1 @@
|
|||
End of poll notifications were not streamed over websockets or web push
|
1
changelog.d/support-honk-image-summaries.add
Normal file
1
changelog.d/support-honk-image-summaries.add
Normal file
|
@ -0,0 +1 @@
|
|||
Support honk-style attachment summaries as alt-text.
|
0
changelog.d/user-refresh-rework.skip
Normal file
0
changelog.d/user-refresh-rework.skip
Normal file
1
changelog.d/user-refresh.change
Normal file
1
changelog.d/user-refresh.change
Normal file
|
@ -0,0 +1 @@
|
|||
User profile refreshes are now asynchronous
|
1
changelog.d/video-thumbs.fix
Normal file
1
changelog.d/video-thumbs.fix
Normal file
|
@ -0,0 +1 @@
|
|||
Video thumbnails were not being generated due to a negative cache lookup logic error
|
0
changelog.d/web_push_actor_regression.skip
Normal file
0
changelog.d/web_push_actor_regression.skip
Normal file
1
changelog.d/webpush-polls.change
Normal file
1
changelog.d/webpush-polls.change
Normal file
|
@ -0,0 +1 @@
|
|||
Render nice web push notifications for polls
|
|
@ -1,4 +1,4 @@
|
|||
FROM elixir:1.13.4-otp-24
|
||||
FROM elixir:1.13.4-otp-25
|
||||
|
||||
# Single RUN statement, otherwise intermediate images are created
|
||||
# https://docs.docker.com/develop/develop-images/dockerfile_best-practices/#run
|
|
@ -1 +1 @@
|
|||
docker buildx build --platform linux/amd64,linux/arm64 -t git.pleroma.social:5050/pleroma/pleroma/ci-base:elixir-1.13.4-otp-24 --push .
|
||||
docker buildx build --platform linux/amd64,linux/arm64 -t git.pleroma.social:5050/pleroma/pleroma/ci-base:elixir-1.13.4-otp-25 --push .
|
|
@ -1 +0,0 @@
|
|||
docker buildx build --platform linux/amd64,linux/arm64 -t git.pleroma.social:5050/pleroma/pleroma/ci-base:elixir-1.15-otp25 --push .
|
|
@ -1,4 +1,4 @@
|
|||
FROM elixir:1.15.7-otp-25
|
||||
FROM elixir:1.15.8-otp-26
|
||||
|
||||
# Single RUN statement, otherwise intermediate images are created
|
||||
# https://docs.docker.com/develop/develop-images/dockerfile_best-practices/#run
|
1
ci/elixir-1.15.8-otp-26/build_and_push.sh
Executable file
1
ci/elixir-1.15.8-otp-26/build_and_push.sh
Executable file
|
@ -0,0 +1 @@
|
|||
docker buildx build --platform linux/amd64,linux/arm64 -t git.pleroma.social:5050/pleroma/pleroma/ci-base:elixir-1.15.8-otp-26 --push .
|
8
ci/elixir-1.16.3-otp-26/Dockerfile
Normal file
8
ci/elixir-1.16.3-otp-26/Dockerfile
Normal file
|
@ -0,0 +1,8 @@
|
|||
FROM elixir:1.16.3-otp-26
|
||||
|
||||
# Single RUN statement, otherwise intermediate images are created
|
||||
# https://docs.docker.com/develop/develop-images/dockerfile_best-practices/#run
|
||||
RUN apt-get update &&\
|
||||
apt-get install -y libmagic-dev cmake libimage-exiftool-perl ffmpeg &&\
|
||||
mix local.hex --force &&\
|
||||
mix local.rebar --force
|
1
ci/elixir-1.16.3-otp-26/build_and_push.sh
Executable file
1
ci/elixir-1.16.3-otp-26/build_and_push.sh
Executable file
|
@ -0,0 +1 @@
|
|||
docker buildx build --platform linux/amd64,linux/arm64 -t git.pleroma.social:5050/pleroma/pleroma/ci-base:elixir-1.16.3-otp-26 --push .
|
8
ci/elixir-1.17.1-otp-26/Dockerfile
Normal file
8
ci/elixir-1.17.1-otp-26/Dockerfile
Normal file
|
@ -0,0 +1,8 @@
|
|||
FROM elixir:1.17.1-otp-26
|
||||
|
||||
# Single RUN statement, otherwise intermediate images are created
|
||||
# https://docs.docker.com/develop/develop-images/dockerfile_best-practices/#run
|
||||
RUN apt-get update &&\
|
||||
apt-get install -y libmagic-dev cmake libimage-exiftool-perl ffmpeg &&\
|
||||
mix local.hex --force &&\
|
||||
mix local.rebar --force
|
1
ci/elixir-1.17.1-otp-26/build_and_push.sh
Executable file
1
ci/elixir-1.17.1-otp-26/build_and_push.sh
Executable file
|
@ -0,0 +1 @@
|
|||
docker buildx build --platform linux/amd64,linux/arm64 -t git.pleroma.social:5050/pleroma/pleroma/ci-base:elixir-1.17.1-otp-26 --push .
|
|
@ -83,8 +83,8 @@ config :ex_aws, :s3,
|
|||
scheme: "https://"
|
||||
|
||||
config :pleroma, Pleroma.Uploaders.IPFS,
|
||||
post_gateway_url: nil,
|
||||
get_gateway_url: nil
|
||||
post_gateway_url: "http://localhost:5001",
|
||||
get_gateway_url: "http://localhost:8080"
|
||||
|
||||
config :pleroma, :emoji,
|
||||
shortcode_globs: ["/emoji/custom/**/*.png"],
|
||||
|
@ -132,16 +132,18 @@ config :pleroma, Pleroma.Web.Endpoint,
|
|||
]
|
||||
|
||||
# Configures Elixir's Logger
|
||||
config :logger, backends: [:console]
|
||||
|
||||
config :logger, :console,
|
||||
level: :debug,
|
||||
format: "\n$time $metadata[$level] $message\n",
|
||||
metadata: [:request_id]
|
||||
metadata: [:actor, :path, :type, :user]
|
||||
|
||||
config :logger, :ex_syslogger,
|
||||
level: :debug,
|
||||
ident: "pleroma",
|
||||
format: "$metadata[$level] $message",
|
||||
metadata: [:request_id]
|
||||
metadata: [:actor, :path, :type, :user]
|
||||
|
||||
config :mime, :types, %{
|
||||
"application/xml" => ["xml"],
|
||||
|
@ -192,6 +194,7 @@ config :pleroma, :instance,
|
|||
allow_relay: true,
|
||||
public: true,
|
||||
quarantined_instances: [],
|
||||
rejected_instances: [],
|
||||
static_dir: "instance/static/",
|
||||
allowed_post_formats: [
|
||||
"text/plain",
|
||||
|
@ -410,6 +413,11 @@ config :pleroma, :mrf_vocabulary,
|
|||
accept: [],
|
||||
reject: []
|
||||
|
||||
config :pleroma, :mrf_dnsrbl,
|
||||
nameserver: "127.0.0.1",
|
||||
port: 53,
|
||||
zone: "bl.pleroma.com"
|
||||
|
||||
# threshold of 7 days
|
||||
config :pleroma, :mrf_object_age,
|
||||
threshold: 604_800,
|
||||
|
@ -430,6 +438,8 @@ config :pleroma, :mrf_force_mention,
|
|||
mention_parent: true,
|
||||
mention_quoted: true
|
||||
|
||||
config :pleroma, :mrf_antimentionspam, user_age_limit: 30_000
|
||||
|
||||
config :pleroma, :rich_media,
|
||||
enabled: true,
|
||||
ignore_hosts: [],
|
||||
|
@ -512,7 +522,8 @@ config :pleroma, :http_security,
|
|||
sts: false,
|
||||
sts_max_age: 31_536_000,
|
||||
ct_max_age: 2_592_000,
|
||||
referrer_policy: "same-origin"
|
||||
referrer_policy: "same-origin",
|
||||
allow_unsafe_eval: false
|
||||
|
||||
config :cors_plug,
|
||||
max_age: 86_400,
|
||||
|
@ -574,24 +585,13 @@ config :pleroma, Oban,
|
|||
log: false,
|
||||
queues: [
|
||||
activity_expiration: 10,
|
||||
token_expiration: 5,
|
||||
filter_expiration: 1,
|
||||
backup: 1,
|
||||
federator_incoming: 5,
|
||||
federator_outgoing: 5,
|
||||
ingestion_queue: 50,
|
||||
web_push: 50,
|
||||
mailer: 10,
|
||||
transmogrifier: 20,
|
||||
scheduled_activities: 10,
|
||||
poll_notifications: 10,
|
||||
background: 5,
|
||||
remote_fetcher: 2,
|
||||
attachments_cleanup: 1,
|
||||
new_users_digest: 1,
|
||||
mute_expire: 5,
|
||||
background: 20,
|
||||
search_indexing: [limit: 10, paused: true],
|
||||
rich_media_expiration: 2
|
||||
slow: 5
|
||||
],
|
||||
plugins: [Oban.Plugins.Pruner],
|
||||
crontab: [
|
||||
|
@ -829,22 +829,27 @@ config :pleroma, :connections_pool,
|
|||
|
||||
config :pleroma, :pools,
|
||||
federation: [
|
||||
size: 50,
|
||||
max_waiting: 10,
|
||||
size: 75,
|
||||
max_waiting: 20,
|
||||
recv_timeout: 10_000
|
||||
],
|
||||
media: [
|
||||
size: 50,
|
||||
size: 75,
|
||||
max_waiting: 20,
|
||||
recv_timeout: 15_000
|
||||
],
|
||||
rich_media: [
|
||||
size: 25,
|
||||
max_waiting: 20,
|
||||
recv_timeout: 15_000
|
||||
],
|
||||
upload: [
|
||||
size: 25,
|
||||
max_waiting: 5,
|
||||
max_waiting: 20,
|
||||
recv_timeout: 15_000
|
||||
],
|
||||
default: [
|
||||
size: 10,
|
||||
size: 50,
|
||||
max_waiting: 2,
|
||||
recv_timeout: 5_000
|
||||
]
|
||||
|
@ -858,6 +863,10 @@ config :pleroma, :hackney_pools,
|
|||
max_connections: 50,
|
||||
timeout: 150_000
|
||||
],
|
||||
rich_media: [
|
||||
max_connections: 50,
|
||||
timeout: 150_000
|
||||
],
|
||||
upload: [
|
||||
max_connections: 25,
|
||||
timeout: 300_000
|
||||
|
@ -903,8 +912,6 @@ config :pleroma, Pleroma.User.Backup,
|
|||
process_chunk_size: 100
|
||||
|
||||
config :pleroma, ConcurrentLimiter, [
|
||||
{Pleroma.Web.RichMedia.Helpers, [max_running: 5, max_waiting: 5]},
|
||||
{Pleroma.Web.ActivityPub.MRF.MediaProxyWarmingPolicy, [max_running: 5, max_waiting: 5]},
|
||||
{Pleroma.Search, [max_running: 30, max_waiting: 50]},
|
||||
{Pleroma.Webhook.Notify, [max_running: 5, max_waiting: 200]}
|
||||
]
|
||||
|
@ -927,6 +934,19 @@ config :pleroma, Pleroma.Application,
|
|||
|
||||
config :pleroma, Pleroma.Uploaders.Uploader, timeout: 30_000
|
||||
|
||||
config :pleroma, Pleroma.Search.QdrantSearch,
|
||||
qdrant_url: "http://127.0.0.1:6333/",
|
||||
qdrant_api_key: "",
|
||||
openai_url: "http://127.0.0.1:11345",
|
||||
# The healthcheck url has to be set to nil when used with the real openai
|
||||
# API, as it doesn't have a healthcheck endpoint.
|
||||
openai_healthcheck_url: "http://127.0.0.1:11345/health",
|
||||
openai_model: "snowflake/snowflake-arctic-embed-xs",
|
||||
openai_api_key: "",
|
||||
qdrant_index_configuration: %{
|
||||
vectors: %{size: 384, distance: "Cosine"}
|
||||
}
|
||||
|
||||
# Import environment specific config. This must remain at the bottom
|
||||
# of this file so it overrides the configuration defined above.
|
||||
import_config "#{Mix.env()}.exs"
|
||||
|
|
|
@ -774,6 +774,18 @@ config :pleroma, :config_description, [
|
|||
{"*.quarantined.com", "Reason"}
|
||||
]
|
||||
},
|
||||
%{
|
||||
key: :rejected_instances,
|
||||
type: {:list, :tuple},
|
||||
key_placeholder: "instance",
|
||||
value_placeholder: "reason",
|
||||
description:
|
||||
"List of ActivityPub instances to reject requests from if authorized_fetch_mode is enabled",
|
||||
suggestions: [
|
||||
{"rejected.com", "Reason"},
|
||||
{"*.rejected.com", "Reason"}
|
||||
]
|
||||
},
|
||||
%{
|
||||
key: :static_dir,
|
||||
type: :string,
|
||||
|
@ -1216,79 +1228,6 @@ config :pleroma, :config_description, [
|
|||
}
|
||||
]
|
||||
},
|
||||
%{
|
||||
group: :logger,
|
||||
type: :group,
|
||||
description: "Logger-related settings",
|
||||
children: [
|
||||
%{
|
||||
key: :backends,
|
||||
type: [:atom, :tuple, :module],
|
||||
description:
|
||||
"Where logs will be sent, :console - send logs to stdout, { ExSyslogger, :ex_syslogger } - to syslog, Quack.Logger - to Slack.",
|
||||
suggestions: [:console, {ExSyslogger, :ex_syslogger}]
|
||||
}
|
||||
]
|
||||
},
|
||||
%{
|
||||
group: :logger,
|
||||
type: :group,
|
||||
key: :ex_syslogger,
|
||||
label: "ExSyslogger",
|
||||
description: "ExSyslogger-related settings",
|
||||
children: [
|
||||
%{
|
||||
key: :level,
|
||||
type: {:dropdown, :atom},
|
||||
description: "Log level",
|
||||
suggestions: [:debug, :info, :warning, :error]
|
||||
},
|
||||
%{
|
||||
key: :ident,
|
||||
type: :string,
|
||||
description:
|
||||
"A string that's prepended to every message, and is typically set to the app name",
|
||||
suggestions: ["pleroma"]
|
||||
},
|
||||
%{
|
||||
key: :format,
|
||||
type: :string,
|
||||
description: "Default: \"$date $time [$level] $levelpad$node $metadata $message\"",
|
||||
suggestions: ["$metadata[$level] $message"]
|
||||
},
|
||||
%{
|
||||
key: :metadata,
|
||||
type: {:list, :atom},
|
||||
suggestions: [:request_id]
|
||||
}
|
||||
]
|
||||
},
|
||||
%{
|
||||
group: :logger,
|
||||
type: :group,
|
||||
key: :console,
|
||||
label: "Console Logger",
|
||||
description: "Console logger settings",
|
||||
children: [
|
||||
%{
|
||||
key: :level,
|
||||
type: {:dropdown, :atom},
|
||||
description: "Log level",
|
||||
suggestions: [:debug, :info, :warning, :error]
|
||||
},
|
||||
%{
|
||||
key: :format,
|
||||
type: :string,
|
||||
description: "Default: \"$date $time [$level] $levelpad$node $metadata $message\"",
|
||||
suggestions: ["$metadata[$level] $message"]
|
||||
},
|
||||
%{
|
||||
key: :metadata,
|
||||
type: {:list, :atom},
|
||||
suggestions: [:request_id]
|
||||
}
|
||||
]
|
||||
},
|
||||
%{
|
||||
group: :pleroma,
|
||||
key: :frontend_configurations,
|
||||
|
@ -1816,6 +1755,12 @@ config :pleroma, :config_description, [
|
|||
type: :boolean,
|
||||
description: "Require HTTP signatures for AP fetches"
|
||||
},
|
||||
%{
|
||||
key: :authorized_fetch_mode_exceptions,
|
||||
type: {:list, :string},
|
||||
description:
|
||||
"List of IPs (CIDR format accepted) to exempt from HTTP Signatures requirement (for example to allow debugging, you shouldn't otherwise need this)"
|
||||
},
|
||||
%{
|
||||
key: :note_replies_output_limit,
|
||||
type: :integer,
|
||||
|
|
|
@ -35,8 +35,8 @@ config :pleroma, Pleroma.Emails.Mailer, adapter: Swoosh.Adapters.Local
|
|||
# configured to run both http and https servers on
|
||||
# different ports.
|
||||
|
||||
# Do not include metadata nor timestamps in development logs
|
||||
config :logger, :console, format: "[$level] $message\n"
|
||||
# Do not include timestamps in development logs
|
||||
config :logger, Logger.Backends.Console, format: "$metadata[$level] $message\n"
|
||||
|
||||
# Set a higher stacktrace during development. Avoid configuring such
|
||||
# in production as building large stacktraces may be expensive.
|
||||
|
|
|
@ -20,6 +20,7 @@ config :pleroma, Pleroma.Web.Endpoint,
|
|||
config :phoenix, serve_endpoints: true
|
||||
|
||||
# Do not print debug messages in production
|
||||
config :logger, Logger.Backends.Console, level: :info
|
||||
config :logger, :console, level: :info
|
||||
config :logger, :ex_syslogger, level: :info
|
||||
|
||||
|
|
|
@ -49,7 +49,8 @@ config :pleroma, Pleroma.Repo,
|
|||
hostname: System.get_env("DB_HOST") || "localhost",
|
||||
port: System.get_env("DB_PORT") || "5432",
|
||||
pool: Ecto.Adapters.SQL.Sandbox,
|
||||
pool_size: System.schedulers_online() * 2
|
||||
pool_size: System.schedulers_online() * 2,
|
||||
log: false
|
||||
|
||||
config :pleroma, :dangerzone, override_repo_pool_size: true
|
||||
|
||||
|
@ -154,6 +155,11 @@ config :pleroma, Pleroma.Upload, config_impl: Pleroma.UnstubbedConfigMock
|
|||
config :pleroma, Pleroma.ScheduledActivity, config_impl: Pleroma.UnstubbedConfigMock
|
||||
config :pleroma, Pleroma.Web.RichMedia.Helpers, config_impl: Pleroma.StaticStubbedConfigMock
|
||||
config :pleroma, Pleroma.Uploaders.IPFS, config_impl: Pleroma.UnstubbedConfigMock
|
||||
config :pleroma, Pleroma.Web.Plugs.HTTPSecurityPlug, config_impl: Pleroma.StaticStubbedConfigMock
|
||||
config :pleroma, Pleroma.Web.Plugs.HTTPSignaturePlug, config_impl: Pleroma.StaticStubbedConfigMock
|
||||
|
||||
config :pleroma, Pleroma.Web.Plugs.HTTPSignaturePlug,
|
||||
http_signatures_impl: Pleroma.StubbedHTTPSignaturesMock
|
||||
|
||||
peer_module =
|
||||
if String.to_integer(System.otp_release()) >= 25 do
|
||||
|
@ -172,11 +178,14 @@ config :pleroma, Pleroma.Application,
|
|||
streamer_registry: false,
|
||||
test_http_pools: true
|
||||
|
||||
config :pleroma, Pleroma.Web.Streaming, sync_streaming: true
|
||||
|
||||
config :pleroma, Pleroma.Uploaders.Uploader, timeout: 1_000
|
||||
|
||||
config :pleroma, Pleroma.Emoji.Loader, test_emoji: true
|
||||
|
||||
config :pleroma, Pleroma.Web.RichMedia.Backfill, provider: Pleroma.Web.RichMedia.Backfill
|
||||
config :pleroma, Pleroma.Web.RichMedia.Backfill,
|
||||
stream_out: Pleroma.Web.ActivityPub.ActivityPubMock
|
||||
|
||||
if File.exists?("./config/test.secret.exs") do
|
||||
import_config "test.secret.exs"
|
||||
|
|
|
@ -154,4 +154,19 @@ This forcibly removes all saved values in the database.
|
|||
|
||||
```sh
|
||||
mix pleroma.config [--force] reset
|
||||
|
||||
```
|
||||
|
||||
## Remove invalid MRF modules from the database
|
||||
|
||||
This forcibly removes any enabled MRF that does not exist and will fix the ability of the instance to start.
|
||||
|
||||
=== "OTP"
|
||||
```sh
|
||||
./bin/pleroma_ctl config fix_mrf_policies
|
||||
```
|
||||
|
||||
=== "From Source"
|
||||
```sh
|
||||
mix pleroma.config fix_mrf_policies
|
||||
```
|
|
@ -21,16 +21,18 @@ Replaces embedded objects with references to them in the `objects` table. Only n
|
|||
mix pleroma.database remove_embedded_objects [option ...]
|
||||
```
|
||||
|
||||
|
||||
### Options
|
||||
- `--vacuum` - run `VACUUM FULL` after the embedded objects are replaced with their references
|
||||
|
||||
## Prune old remote posts from the database
|
||||
|
||||
This will prune remote posts older than 90 days (configurable with [`config :pleroma, :instance, remote_post_retention_days`](../../configuration/cheatsheet.md#instance)) from the database, they will be refetched from source when accessed.
|
||||
This will prune remote posts older than 90 days (configurable with [`config :pleroma, :instance, remote_post_retention_days`](../../configuration/cheatsheet.md#instance)) from the database. Pruned posts may be refetched in some cases.
|
||||
|
||||
!!! note
|
||||
The disk space will only be reclaimed after a proper vacuum. By default Postgresql does this for you on a regular basis, but if your instance has been running for a long time and there are many rows deleted, it may be advantageous to use `VACUUM FULL` (e.g. by using the `--vacuum` option).
|
||||
|
||||
!!! danger
|
||||
The disk space will only be reclaimed after `VACUUM FULL`. You may run out of disk space during the execution of the task or vacuuming if you don't have about 1/3rds of the database size free.
|
||||
You may run out of disk space during the execution of the task or vacuuming if you don't have about 1/3rds of the database size free. Vacuum causes a substantial increase in I/O traffic, and may lead to a degraded experience while it is running.
|
||||
|
||||
=== "OTP"
|
||||
|
||||
|
@ -45,7 +47,11 @@ This will prune remote posts older than 90 days (configurable with [`config :ple
|
|||
```
|
||||
|
||||
### Options
|
||||
- `--vacuum` - run `VACUUM FULL` after the objects are pruned
|
||||
|
||||
- `--keep-threads` - Don't prune posts when they are part of a thread where at least one post has seen local interaction (e.g. one of the posts is a local post, or is favourited by a local user, or has been repeated by a local user...). It also won't delete posts when at least one of the posts in that thread is kept (e.g. because one of the posts has seen recent activity).
|
||||
- `--keep-non-public` - Keep non-public posts like DM's and followers-only, even if they are remote.
|
||||
- `--prune-orphaned-activities` - Also prune orphaned activities afterwards. Activities are things like Like, Create, Announce, Flag (aka reports). They can significantly help reduce the database size. Note: this can take a very long time.
|
||||
- `--vacuum` - Run `VACUUM FULL` after the objects are pruned. This should not be used on a regular basis, but is useful if your instance has been running for a long time before pruning.
|
||||
|
||||
## Create a conversation for all existing DMs
|
||||
|
||||
|
@ -93,6 +99,9 @@ Can be safely re-run
|
|||
|
||||
## Vacuum the database
|
||||
|
||||
!!! note
|
||||
By default Postgresql has an autovacuum deamon running. While the tasks described here can help in some cases, they shouldn't be needed on a regular basis. See [the Postgresql docs on vacuuming](https://www.postgresql.org/docs/current/sql-vacuum.html) for more information on this.
|
||||
|
||||
### Analyze
|
||||
|
||||
Running an `analyze` vacuum job can improve performance by updating statistics used by the query planner. **It is safe to cancel this.**
|
||||
|
|
|
@ -41,6 +41,7 @@ To add configuration to your config file, you can copy it from the base config.
|
|||
* `allow_relay`: Permits remote instances to subscribe to all public posts of your instance. This may increase the visibility of your instance.
|
||||
* `public`: Makes the client API in authenticated mode-only except for user-profiles. Useful for disabling the Local Timeline and The Whole Known Network. Note that there is a dependent setting restricting or allowing unauthenticated access to specific resources, see `restrict_unauthenticated` for more details.
|
||||
* `quarantined_instances`: ActivityPub instances where private (DMs, followers-only) activities will not be send.
|
||||
* `rejected_instances`: ActivityPub instances to reject requests from if authorized_fetch_mode is enabled.
|
||||
* `allowed_post_formats`: MIME-type list of formats allowed to be posted (transformed into HTML).
|
||||
* `extended_nickname_format`: Set to `true` to use extended local nicknames format (allows underscores/dashes). This will break federation with
|
||||
older software for theses nicknames.
|
||||
|
@ -284,6 +285,7 @@ Notes:
|
|||
* `deny_follow_blocked`: Whether to disallow following an account that has blocked the user in question
|
||||
* `sign_object_fetches`: Sign object fetches with HTTP signatures
|
||||
* `authorized_fetch_mode`: Require HTTP signatures for AP fetches
|
||||
* `authorized_fetch_mode_exceptions`: List of IPs (CIDR format accepted) to exempt from HTTP Signatures requirement (for example to allow debugging, you shouldn't otherwise need this)
|
||||
|
||||
## Pleroma.User
|
||||
|
||||
|
@ -472,6 +474,7 @@ This will make Pleroma listen on `127.0.0.1` port `8080` and generate urls start
|
|||
* ``ct_max_age``: The maximum age for the `Expect-CT` header if sent.
|
||||
* ``referrer_policy``: The referrer policy to use, either `"same-origin"` or `"no-referrer"`.
|
||||
* ``report_uri``: Adds the specified url to `report-uri` and `report-to` group in CSP header.
|
||||
* `allow_unsafe_eval`: Adds `wasm-unsafe-eval` to the CSP header. Needed for some non-essential frontend features like Flash emulation.
|
||||
|
||||
### Pleroma.Web.Plugs.RemoteIp
|
||||
|
||||
|
@ -850,7 +853,7 @@ config :logger,
|
|||
backends: [{ExSyslogger, :ex_syslogger}]
|
||||
|
||||
config :logger, :ex_syslogger,
|
||||
level: :warn
|
||||
level: :warning
|
||||
```
|
||||
|
||||
Another example, keeping console output and adding the pid to syslog output:
|
||||
|
@ -859,7 +862,7 @@ config :logger,
|
|||
backends: [:console, {ExSyslogger, :ex_syslogger}]
|
||||
|
||||
config :logger, :ex_syslogger,
|
||||
level: :warn,
|
||||
level: :warning,
|
||||
option: [:pid, :ndelay]
|
||||
```
|
||||
|
||||
|
|
|
@ -10,6 +10,30 @@ To use built-in search that has no external dependencies, set the search module
|
|||
|
||||
While it has no external dependencies, it has problems with performance and relevancy.
|
||||
|
||||
## QdrantSearch
|
||||
|
||||
This uses the vector search engine [Qdrant](https://qdrant.tech) to search the posts in a vector space. This needs a way to generate embeddings and uses the [OpenAI API](https://platform.openai.com/docs/guides/embeddings/what-are-embeddings). This is implemented by several project besides OpenAI itself, including the python-based fastembed-server found in `supplemental/search/fastembed-api`.
|
||||
|
||||
The default settings will support a setup where both the fastembed server and Qdrant run on the same system as pleroma. To use it, set the search provider and run the fastembed server, see the README in `supplemental/search/fastembed-api`:
|
||||
|
||||
> config :pleroma, Pleroma.Search, module: Pleroma.Search.QdrantSearch
|
||||
|
||||
Then, start the Qdrant server, see [here](https://qdrant.tech/documentation/quick-start/) for instructions.
|
||||
|
||||
You will also need to create the Qdrant index once by running `mix pleroma.search.indexer create_index`. Running `mix pleroma.search.indexer index` will retroactively index the last 100_000 activities.
|
||||
|
||||
### Indexing and model options
|
||||
|
||||
To see the available configuration options, check out the QdrantSearch section in `config/config.exs`.
|
||||
|
||||
The default indexing option work for the default model (`snowflake-arctic-embed-xs`). To optimize for a low memory footprint, adjust the index configuration as described in the [Qdrant docs](https://qdrant.tech/documentation/guides/optimize/). See also [this blog post](https://qdrant.tech/articles/memory-consumption/) that goes into detail.
|
||||
|
||||
Different embedding models will need different vector size settings. You can see a list of the models supported by the fastembed server [here](https://qdrant.github.io/fastembed/examples/Supported_Models), including their vector dimensions. These vector dimensions need to be set in the `qdrant_index_configuration`.
|
||||
|
||||
E.g, If you want to use `sentence-transformers/all-MiniLM-L6-v2` as a model, you will not need to adjust things, because it and `snowflake-arctic-embed-xs` are both 384 dimensional models. If you want to use `snowflake/snowflake-arctic-embed-l`, you will need to adjust the `size` parameter in the `qdrant_index_configuration` to 1024, as it has a dimension of 1024.
|
||||
|
||||
When using a different model, you will need do drop the index and recreate it (`mix pleroma.search.indexer drop_index` and `mix pleroma.search.indexer create_index`), as the different embeddings are not compatible with each other.
|
||||
|
||||
## Meilisearch
|
||||
|
||||
Note that it's quite a bit more memory hungry than PostgreSQL (around 4-5G for ~1.2 million
|
||||
|
|
|
@ -1,44 +1,47 @@
|
|||
# Prometheus Metrics
|
||||
# Prometheus / OpenTelemetry Metrics
|
||||
|
||||
Pleroma includes support for exporting metrics via the [prometheus_ex](https://github.com/deadtrickster/prometheus.ex) library.
|
||||
Pleroma includes support for exporting metrics via the [prom_ex](https://github.com/akoutmos/prom_ex) library.
|
||||
The metrics are exposed by a dedicated webserver/port to improve privacy and security.
|
||||
|
||||
Config example:
|
||||
|
||||
```
|
||||
config :prometheus, Pleroma.Web.Endpoint.MetricsExporter,
|
||||
enabled: true,
|
||||
auth: {:basic, "myusername", "mypassword"},
|
||||
ip_whitelist: ["127.0.0.1"],
|
||||
path: "/api/pleroma/app_metrics",
|
||||
format: :text
|
||||
```
|
||||
|
||||
* `enabled` (Pleroma extension) enables the endpoint
|
||||
* `ip_whitelist` (Pleroma extension) could be used to restrict access only to specified IPs
|
||||
* `auth` sets the authentication (`false` for no auth; configurable to HTTP Basic Auth, see [prometheus-plugs](https://github.com/deadtrickster/prometheus-plugs#exporting) documentation)
|
||||
* `format` sets the output format (`:text` or `:protobuf`)
|
||||
* `path` sets the path to app metrics page
|
||||
|
||||
|
||||
## `/api/pleroma/app_metrics`
|
||||
|
||||
### Exports Prometheus application metrics
|
||||
|
||||
* Method: `GET`
|
||||
* Authentication: not required by default (see configuration options above)
|
||||
* Params: none
|
||||
* Response: text
|
||||
|
||||
## Grafana
|
||||
|
||||
### Config example
|
||||
|
||||
The following is a config example to use with [Grafana](https://grafana.com)
|
||||
config :pleroma, Pleroma.PromEx,
|
||||
disabled: false,
|
||||
manual_metrics_start_delay: :no_delay,
|
||||
drop_metrics_groups: [],
|
||||
grafana: [
|
||||
host: System.get_env("GRAFANA_HOST", "http://localhost:3000"),
|
||||
auth_token: System.get_env("GRAFANA_TOKEN"),
|
||||
upload_dashboards_on_start: false,
|
||||
folder_name: "BEAM",
|
||||
annotate_app_lifecycle: true
|
||||
],
|
||||
metrics_server: [
|
||||
port: 4021,
|
||||
path: "/metrics",
|
||||
protocol: :http,
|
||||
pool_size: 5,
|
||||
cowboy_opts: [],
|
||||
auth_strategy: :none
|
||||
],
|
||||
datasource: "Prometheus"
|
||||
|
||||
```
|
||||
- job_name: 'beam'
|
||||
metrics_path: /api/pleroma/app_metrics
|
||||
scheme: https
|
||||
|
||||
PromEx supports the ability to automatically publish dashboards to your Grafana server as well as register Annotations. If you do not wish to configure this capability you must generate the dashboard JSON files and import them directly. You can find the mix commands in the upstream [documentation](https://hexdocs.pm/prom_ex/Mix.Tasks.PromEx.Dashboard.Export.html). You can find the list of modules enabled in Pleroma for which you should generate dashboards for by examining the contents of the `lib/pleroma/prom_ex.ex` module.
|
||||
|
||||
## prometheus.yml
|
||||
|
||||
The following is a bare minimum config example to use with [Prometheus](https://prometheus.io) or Prometheus-compatible software like [VictoriaMetrics](https://victoriametrics.com).
|
||||
|
||||
```
|
||||
global:
|
||||
scrape_interval: 15s
|
||||
|
||||
scrape_configs:
|
||||
- job_name: 'pleroma'
|
||||
scheme: http
|
||||
static_configs:
|
||||
- targets: ['pleroma.soykaf.com']
|
||||
- targets: ['pleroma.soykaf.com:4021']
|
||||
```
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
## Required dependencies
|
||||
|
||||
* PostgreSQL >=11.0
|
||||
* Elixir >=1.13.0 <1.15
|
||||
* Elixir >=1.13.0 <1.17
|
||||
* Erlang OTP >=22.2.0 (supported: <27)
|
||||
* git
|
||||
* file / libmagic
|
||||
|
|
|
@ -2,14 +2,41 @@
|
|||
|
||||
{! backend/installation/generic_dependencies.include !}
|
||||
|
||||
## Installing software used in this guide
|
||||
# Installation options
|
||||
|
||||
Currently there are two options available for NetBSD: manual installation (from source) or using experimental package from [pkgsrc-wip](https://github.com/NetBSD/pkgsrc-wip/tree/master/pleroma).
|
||||
|
||||
WIP package can be installed via pkgsrc and can be crosscompiled for easier binary distribution. Source installation most probably will be restricted to a single machine.
|
||||
|
||||
## pkgsrc installation
|
||||
|
||||
WIP package creates Mix.Release (similar to how Docker images are built) but doesn't bundle Erlang runtime, listing it as a dependency instead. This allows for easier and more modular installations, especially on weaker machines. Currently this method also does not support all features of `pleroma_ctl` command (like changing installation type or managing frontends) as NetBSD is not yet a supported binary flavour of Pleroma's CI.
|
||||
|
||||
In any case, you can install it the same way as any other `pkgsrc-wip` package:
|
||||
|
||||
```
|
||||
cd /usr/pkgsrc
|
||||
git clone --depth 1 git://wip.pkgsrc.org/pkgsrc-wip.git wip
|
||||
cp -rf wip/pleroma www
|
||||
cp -rf wip/libvips graphics
|
||||
cd /usr/pkgsrc/www/pleroma
|
||||
bmake && bmake install
|
||||
```
|
||||
|
||||
Use `bmake package` to create a binary package. This can come especially handy if you're targeting embedded or low-power systems and are crosscompiling on a more powerful machine.
|
||||
|
||||
> Note: Elixir has [endianness bug](https://github.com/elixir-lang/elixir/issues/2785) which requires it to be compiled on a machine with the same endianness. In other words, package crosscompiled on amd64 (little endian) won't work on powerpc or sparc machines (big endian). While _in theory™_ nothing catastrophic should happen, one can see that for example regexes won't work properly. Some distributions just strip this warning away, so it doesn't bother the users... anyway, you've been warned.
|
||||
|
||||
## Source installation
|
||||
|
||||
pkgin should have been installed by the NetBSD installer if you selected
|
||||
the right options. If it isn't installed, install it using pkg_add.
|
||||
the right options. If it isn't installed, install it using `pkg_add`.
|
||||
|
||||
Note that `postgresql11-contrib` is needed for the Postgres extensions
|
||||
Pleroma uses.
|
||||
|
||||
> Note: you can use modern versions of PostgreSQL. In this case, just use `postgresql16-contrib` and so on.
|
||||
|
||||
The `mksh` shell is needed to run the Elixir `mix` script.
|
||||
|
||||
`# pkgin install acmesh elixir git-base git-docs mksh nginx postgresql11-server postgresql11-client postgresql11-contrib sudo ffmpeg4 ImageMagick`
|
||||
|
@ -29,29 +56,6 @@ shells/mksh
|
|||
www/nginx
|
||||
```
|
||||
|
||||
Copy the rc.d scripts to the right directory:
|
||||
|
||||
```
|
||||
# cp /usr/pkg/share/examples/rc.d/nginx /usr/pkg/share/examples/rc.d/pgsql /etc/rc.d
|
||||
```
|
||||
|
||||
Add nginx and Postgres to `/etc/rc.conf`:
|
||||
|
||||
```
|
||||
nginx=YES
|
||||
pgsql=YES
|
||||
```
|
||||
|
||||
## Configuring postgres
|
||||
|
||||
First, run `# /etc/rc.d/pgsql start`. Then, `$ sudo -Hu pgsql -g pgsql createdb`.
|
||||
|
||||
### Install media / graphics packages (optional, see [`docs/installation/optional/media_graphics_packages.md`](../installation/optional/media_graphics_packages.md))
|
||||
|
||||
`# pkgin install ImageMagick ffmpeg4 p5-Image-ExifTool`
|
||||
|
||||
## Configuring Pleroma
|
||||
|
||||
Create a user for Pleroma:
|
||||
|
||||
```
|
||||
|
@ -68,41 +72,98 @@ $ cd /home/pleroma
|
|||
$ git clone -b stable https://git.pleroma.social/pleroma/pleroma.git
|
||||
```
|
||||
|
||||
Configure Pleroma. Note that you need a domain name at this point:
|
||||
Get deps and compile:
|
||||
|
||||
```
|
||||
$ cd /home/pleroma/pleroma
|
||||
$ export MIX_ENV=prod
|
||||
$ mix deps.get
|
||||
$ MIX_ENV=prod mix pleroma.instance gen # You will be asked a few questions here.
|
||||
$ mix compile
|
||||
```
|
||||
|
||||
Since Postgres is configured, we can now initialize the database. There should
|
||||
now be a file in `config/setup_db.psql` that makes this easier. Edit it, and
|
||||
*change the password* to a password of your choice. Make sure it is secure, since
|
||||
## Install media / graphics packages (optional, see [`docs/installation/optional/media_graphics_packages.md`](../installation/optional/media_graphics_packages.md))
|
||||
|
||||
`# pkgin install ImageMagick ffmpeg4 p5-Image-ExifTool`
|
||||
|
||||
or via pkgsrc:
|
||||
|
||||
```
|
||||
graphics/p5-Image-ExifTool
|
||||
graphics/ImageMagick
|
||||
multimedia/ffmpeg4
|
||||
```
|
||||
|
||||
# Configuration
|
||||
|
||||
## Understanding $PREFIX
|
||||
|
||||
From now on, you may encounter `$PREFIX` variable in the paths. This variable indicates your current local pkgsrc prefix. Usually it's `/usr/pkg` unless you configured it otherwise. Translating to pkgsrc's lingo, it's called `LOCALBASE`, which essentially means the same this. You may want to set it up for your local shell session (this uses `mksh` which should already be installed as one of the required dependencies):
|
||||
|
||||
```
|
||||
$ export PREFIX=$(pkg_info -Q LOCALBASE mksh)
|
||||
$ echo $PREFIX
|
||||
/usr/pkg
|
||||
```
|
||||
|
||||
## Setting up your instance
|
||||
|
||||
Now, you need to configure your instance. During this initial configuration, you will be asked some questions about your server. You will need a domain name at this point; it doesn't have to be deployed, but changing it later will be very cumbersome.
|
||||
|
||||
If you've installed via pkgsrc, `pleroma_ctl` should already be in your `PATH`; if you've installed from source, it's located at `/home/pleroma/pleroma/release/bin/pleroma_ctl`.
|
||||
|
||||
```
|
||||
$ su -l pleroma
|
||||
$ pleroma_ctl instance gen --output $PREFIX/etc/pleroma/config.exs --output-psql /tmp/setup_db.psql
|
||||
```
|
||||
|
||||
During installation, you will be asked about static and upload directories. Don't forget to create them and update permissions:
|
||||
|
||||
```
|
||||
mkdir -p /var/lib/pleroma/uploads
|
||||
chown -R pleroma:pleroma /var/lib/pleroma
|
||||
```
|
||||
|
||||
## Setting up the database
|
||||
|
||||
First, run `# /etc/rc.d/pgsql start`. Then, `$ sudo -Hu pgsql -g pgsql createdb`.
|
||||
|
||||
We can now initialize the database. You'll need to edit generated SQL file from the previous step. It's located at `/tmp/setup_db.psql`.
|
||||
|
||||
Edit this file, and *change the password* to a password of your choice. Make sure it is secure, since
|
||||
it'll be protecting your database. Now initialize the database:
|
||||
|
||||
```
|
||||
$ sudo -Hu pgsql -g pgsql psql -f config/setup_db.psql
|
||||
$ sudo -Hu pgsql -g pgsql psql -f /tmp/setup_db.psql
|
||||
```
|
||||
|
||||
Postgres allows connections from all users without a password by default. To
|
||||
fix this, edit `/usr/pkg/pgsql/data/pg_hba.conf`. Change every `trust` to
|
||||
fix this, edit `$PREFIX/pgsql/data/pg_hba.conf`. Change every `trust` to
|
||||
`password`.
|
||||
|
||||
Once this is done, restart Postgres with `# /etc/rc.d/pgsql restart`.
|
||||
|
||||
Run the database migrations.
|
||||
|
||||
### pkgsrc installation
|
||||
|
||||
```
|
||||
pleroma_ctl migrate
|
||||
```
|
||||
|
||||
### Source installation
|
||||
|
||||
You will need to do this whenever you update with `git pull`:
|
||||
|
||||
```
|
||||
$ cd /home/pleroma/pleroma
|
||||
$ MIX_ENV=prod mix ecto.migrate
|
||||
```
|
||||
|
||||
## Configuring nginx
|
||||
|
||||
Install the example configuration file
|
||||
`/home/pleroma/pleroma/installation/pleroma.nginx` to
|
||||
`/usr/pkg/etc/nginx.conf`.
|
||||
(`$PREFIX/share/examples/pleroma/pleroma.nginx` or `/home/pleroma/pleroma/installation/pleroma.nginx`) to
|
||||
`$PREFIX/etc/nginx.conf`.
|
||||
|
||||
Note that it will need to be wrapped in a `http {}` block. You should add
|
||||
settings for the nginx daemon outside of the http block, for example:
|
||||
|
@ -176,27 +237,45 @@ Let's add auto-renewal to `/etc/daily.local`
|
|||
--stateless
|
||||
```
|
||||
|
||||
## Creating a startup script for Pleroma
|
||||
## Autostart
|
||||
|
||||
Copy the startup script to the correct location and make sure it's executable:
|
||||
For properly functioning instance, you will need pleroma (backend service), nginx (reverse proxy) and postgresql (database) services running. There's no requirement for them to reside on the same machine, but you have to provide autostart for each of them.
|
||||
|
||||
### nginx
|
||||
```
|
||||
# cp $PREFIX/share/examples/rc.d/nginx /etc/rc.d
|
||||
# echo "nginx=YES" >> /etc/rc.conf
|
||||
```
|
||||
|
||||
### postgresql
|
||||
|
||||
```
|
||||
# cp $PREFIX/share/examples/rc.d/pgsql /etc/rc.d
|
||||
# echo "pgsql=YES" >> /etc/rc.conf
|
||||
```
|
||||
|
||||
### pleroma
|
||||
|
||||
First, copy the script (pkgsrc variant)
|
||||
```
|
||||
# cp $PREFIX/share/examples/pleroma/pleroma.rc /etc/rc.d/pleroma
|
||||
```
|
||||
|
||||
or source variant
|
||||
```
|
||||
# cp /home/pleroma/pleroma/installation/netbsd/rc.d/pleroma /etc/rc.d/pleroma
|
||||
# chmod +x /etc/rc.d/pleroma
|
||||
```
|
||||
|
||||
Add the following to `/etc/rc.conf`:
|
||||
Then, add the following to `/etc/rc.conf`:
|
||||
|
||||
```
|
||||
pleroma=YES
|
||||
pleroma_home="/home/pleroma"
|
||||
pleroma_user="pleroma"
|
||||
```
|
||||
|
||||
Run `# /etc/rc.d/pleroma start` to start Pleroma.
|
||||
|
||||
## Conclusion
|
||||
|
||||
Run `# /etc/rc.d/pleroma start` to start Pleroma.
|
||||
Restart nginx with `# /etc/rc.d/nginx restart` and you should be up and running.
|
||||
|
||||
Make sure your time is in sync, or other instances will receive your posts with
|
||||
|
|
|
@ -1,11 +1,14 @@
|
|||
#!/bin/sh
|
||||
# PROVIDE: pleroma
|
||||
# REQUIRE: DAEMON pgsql
|
||||
# REQUIRE: DAEMON pgsql nginx
|
||||
|
||||
if [ -f /etc/rc.subr ]; then
|
||||
. /etc/rc.subr
|
||||
fi
|
||||
|
||||
pleroma_home="/home/pleroma"
|
||||
pleroma_user="pleroma"
|
||||
|
||||
name="pleroma"
|
||||
rcvar=${name}
|
||||
command="/usr/pkg/bin/elixir"
|
||||
|
@ -19,10 +22,10 @@ pleroma_env="HOME=${pleroma_home} MIX_ENV=prod"
|
|||
check_pidfile()
|
||||
{
|
||||
pid=$(pgrep -U "${pleroma_user}" /bin/beam.smp$)
|
||||
echo -n "${pid}"
|
||||
printf '%s' "${pid}"
|
||||
}
|
||||
|
||||
if [ -f /etc/rc.subr -a -d /etc/rc.d -a -f /etc/rc.d/DAEMON ]; then
|
||||
if [ -f /etc/rc.subr ] && [ -d /etc/rc.d ] && [ -f /etc/rc.d/DAEMON ]; then
|
||||
# newer NetBSD
|
||||
load_rc_config ${name}
|
||||
run_rc_command "$1"
|
||||
|
@ -39,7 +42,7 @@ else
|
|||
stop)
|
||||
echo "Stopping ${name}."
|
||||
check_pidfile
|
||||
! [ -n ${pid} ] && kill ${pid}
|
||||
! [ -n "${pid}" ] && kill "${pid}"
|
||||
;;
|
||||
|
||||
restart)
|
||||
|
|
|
@ -14,7 +14,8 @@ defmodule Mix.Pleroma do
|
|||
:swoosh,
|
||||
:timex,
|
||||
:fast_html,
|
||||
:oban
|
||||
:oban,
|
||||
:logger_backends
|
||||
]
|
||||
@cachex_children ["object", "user", "scrubber", "web_resp"]
|
||||
@doc "Common functions to be reused in mix tasks"
|
||||
|
|
|
@ -205,6 +205,35 @@ defmodule Mix.Tasks.Pleroma.Config do
|
|||
end
|
||||
end
|
||||
|
||||
# Removes any policies that are not a real module
|
||||
# as they will prevent the server from starting
|
||||
def run(["fix_mrf_policies"]) do
|
||||
check_configdb(fn ->
|
||||
start_pleroma()
|
||||
|
||||
group = :pleroma
|
||||
key = :mrf
|
||||
|
||||
%{value: value} =
|
||||
group
|
||||
|> ConfigDB.get_by_group_and_key(key)
|
||||
|
||||
policies =
|
||||
Keyword.get(value, :policies, [])
|
||||
|> Enum.filter(&is_atom(&1))
|
||||
|> Enum.filter(fn mrf ->
|
||||
case Code.ensure_compiled(mrf) do
|
||||
{:module, _} -> true
|
||||
{:error, _} -> false
|
||||
end
|
||||
end)
|
||||
|
||||
value = Keyword.put(value, :policies, policies)
|
||||
|
||||
ConfigDB.update_or_create(%{group: group, key: key, value: value})
|
||||
end)
|
||||
end
|
||||
|
||||
@spec migrate_to_db(Path.t() | nil) :: any()
|
||||
def migrate_to_db(file_path \\ nil) do
|
||||
with :ok <- Pleroma.Config.DeprecationWarnings.warn() do
|
||||
|
|
|
@ -67,43 +67,168 @@ defmodule Mix.Tasks.Pleroma.Database do
|
|||
OptionParser.parse(
|
||||
args,
|
||||
strict: [
|
||||
vacuum: :boolean
|
||||
vacuum: :boolean,
|
||||
keep_threads: :boolean,
|
||||
keep_non_public: :boolean,
|
||||
prune_orphaned_activities: :boolean
|
||||
]
|
||||
)
|
||||
|
||||
start_pleroma()
|
||||
|
||||
deadline = Pleroma.Config.get([:instance, :remote_post_retention_days])
|
||||
time_deadline = NaiveDateTime.utc_now() |> NaiveDateTime.add(-(deadline * 86_400))
|
||||
|
||||
Logger.info("Pruning objects older than #{deadline} days")
|
||||
log_message = "Pruning objects older than #{deadline} days"
|
||||
|
||||
time_deadline =
|
||||
NaiveDateTime.utc_now()
|
||||
|> NaiveDateTime.add(-(deadline * 86_400))
|
||||
log_message =
|
||||
if Keyword.get(options, :keep_non_public) do
|
||||
log_message <> ", keeping non public posts"
|
||||
else
|
||||
log_message
|
||||
end
|
||||
|
||||
from(o in Object,
|
||||
where:
|
||||
fragment(
|
||||
"?->'to' \\? ? OR ?->'cc' \\? ?",
|
||||
o.data,
|
||||
^Pleroma.Constants.as_public(),
|
||||
o.data,
|
||||
^Pleroma.Constants.as_public()
|
||||
),
|
||||
where: o.inserted_at < ^time_deadline,
|
||||
where:
|
||||
log_message =
|
||||
if Keyword.get(options, :keep_threads) do
|
||||
log_message <> ", keeping threads intact"
|
||||
else
|
||||
log_message
|
||||
end
|
||||
|
||||
log_message =
|
||||
if Keyword.get(options, :prune_orphaned_activities) do
|
||||
log_message <> ", pruning orphaned activities"
|
||||
else
|
||||
log_message
|
||||
end
|
||||
|
||||
log_message =
|
||||
if Keyword.get(options, :vacuum) do
|
||||
log_message <>
|
||||
", doing a full vacuum (you shouldn't do this as a recurring maintanance task)"
|
||||
else
|
||||
log_message
|
||||
end
|
||||
|
||||
Logger.info(log_message)
|
||||
|
||||
if Keyword.get(options, :keep_threads) do
|
||||
# We want to delete objects from threads where
|
||||
# 1. the newest post is still old
|
||||
# 2. none of the activities is local
|
||||
# 3. none of the activities is bookmarked
|
||||
# 4. optionally none of the posts is non-public
|
||||
deletable_context =
|
||||
if Keyword.get(options, :keep_non_public) do
|
||||
Pleroma.Activity
|
||||
|> join(:left, [a], b in Pleroma.Bookmark, on: a.id == b.activity_id)
|
||||
|> group_by([a], fragment("? ->> 'context'::text", a.data))
|
||||
|> having(
|
||||
[a],
|
||||
not fragment(
|
||||
# Posts (checked on Create Activity) is non-public
|
||||
"bool_or((not(?->'to' \\? ? OR ?->'cc' \\? ?)) and ? ->> 'type' = 'Create')",
|
||||
a.data,
|
||||
^Pleroma.Constants.as_public(),
|
||||
a.data,
|
||||
^Pleroma.Constants.as_public(),
|
||||
a.data
|
||||
)
|
||||
)
|
||||
else
|
||||
Pleroma.Activity
|
||||
|> join(:left, [a], b in Pleroma.Bookmark, on: a.id == b.activity_id)
|
||||
|> group_by([a], fragment("? ->> 'context'::text", a.data))
|
||||
end
|
||||
|> having([a], max(a.updated_at) < ^time_deadline)
|
||||
|> having([a], not fragment("bool_or(?)", a.local))
|
||||
|> having([_, b], fragment("max(?::text) is null", b.id))
|
||||
|> select([a], fragment("? ->> 'context'::text", a.data))
|
||||
|
||||
Pleroma.Object
|
||||
|> where([o], fragment("? ->> 'context'::text", o.data) in subquery(deletable_context))
|
||||
else
|
||||
if Keyword.get(options, :keep_non_public) do
|
||||
Pleroma.Object
|
||||
|> where(
|
||||
[o],
|
||||
fragment(
|
||||
"?->'to' \\? ? OR ?->'cc' \\? ?",
|
||||
o.data,
|
||||
^Pleroma.Constants.as_public(),
|
||||
o.data,
|
||||
^Pleroma.Constants.as_public()
|
||||
)
|
||||
)
|
||||
else
|
||||
Pleroma.Object
|
||||
end
|
||||
|> where([o], o.updated_at < ^time_deadline)
|
||||
|> where(
|
||||
[o],
|
||||
fragment("split_part(?->>'actor', '/', 3) != ?", o.data, ^Pleroma.Web.Endpoint.host())
|
||||
)
|
||||
)
|
||||
end
|
||||
|> Repo.delete_all(timeout: :infinity)
|
||||
|
||||
prune_hashtags_query = """
|
||||
if !Keyword.get(options, :keep_threads) do
|
||||
# Without the --keep-threads option, it's possible that bookmarked
|
||||
# objects have been deleted. We remove the corresponding bookmarks.
|
||||
"""
|
||||
delete from public.bookmarks
|
||||
where id in (
|
||||
select b.id from public.bookmarks b
|
||||
left join public.activities a on b.activity_id = a.id
|
||||
left join public.objects o on a."data" ->> 'object' = o.data ->> 'id'
|
||||
where o.id is null
|
||||
)
|
||||
"""
|
||||
|> Repo.query([], timeout: :infinity)
|
||||
end
|
||||
|
||||
if Keyword.get(options, :prune_orphaned_activities) do
|
||||
# Prune activities who link to a single object
|
||||
"""
|
||||
delete from public.activities
|
||||
where id in (
|
||||
select a.id from public.activities a
|
||||
left join public.objects o on a.data ->> 'object' = o.data ->> 'id'
|
||||
left join public.activities a2 on a.data ->> 'object' = a2.data ->> 'id'
|
||||
left join public.users u on a.data ->> 'object' = u.ap_id
|
||||
where not a.local
|
||||
and jsonb_typeof(a."data" -> 'object') = 'string'
|
||||
and o.id is null
|
||||
and a2.id is null
|
||||
and u.id is null
|
||||
)
|
||||
"""
|
||||
|> Repo.query([], timeout: :infinity)
|
||||
|
||||
# Prune activities who link to an array of objects
|
||||
"""
|
||||
delete from public.activities
|
||||
where id in (
|
||||
select a.id from public.activities a
|
||||
join json_array_elements_text((a."data" -> 'object')::json) as j on jsonb_typeof(a."data" -> 'object') = 'array'
|
||||
left join public.objects o on j.value = o.data ->> 'id'
|
||||
left join public.activities a2 on j.value = a2.data ->> 'id'
|
||||
left join public.users u on j.value = u.ap_id
|
||||
group by a.id
|
||||
having max(o.data ->> 'id') is null
|
||||
and max(a2.data ->> 'id') is null
|
||||
and max(u.ap_id) is null
|
||||
)
|
||||
"""
|
||||
|> Repo.query([], timeout: :infinity)
|
||||
end
|
||||
|
||||
"""
|
||||
DELETE FROM hashtags AS ht
|
||||
WHERE NOT EXISTS (
|
||||
SELECT 1 FROM hashtags_objects hto
|
||||
WHERE ht.id = hto.hashtag_id)
|
||||
"""
|
||||
|
||||
Repo.query(prune_hashtags_query)
|
||||
|> Repo.query()
|
||||
|
||||
if Keyword.get(options, :vacuum) do
|
||||
Maintenance.vacuum("full")
|
||||
|
@ -226,7 +351,7 @@ defmodule Mix.Tasks.Pleroma.Database do
|
|||
)
|
||||
end
|
||||
|
||||
shell_info('Done.')
|
||||
shell_info(~c"Done.")
|
||||
end
|
||||
end
|
||||
|
||||
|
|
83
lib/mix/tasks/pleroma/search/indexer.ex
Normal file
83
lib/mix/tasks/pleroma/search/indexer.ex
Normal file
|
@ -0,0 +1,83 @@
|
|||
# Pleroma: A lightweight social networking server
|
||||
# Copyright © 2017-2021 Pleroma Authors <https://pleroma.social/>
|
||||
# SPDX-License-Identifier: AGPL-3.0-only
|
||||
|
||||
defmodule Mix.Tasks.Pleroma.Search.Indexer do
|
||||
import Mix.Pleroma
|
||||
import Ecto.Query
|
||||
|
||||
alias Pleroma.Workers.SearchIndexingWorker
|
||||
|
||||
def run(["create_index"]) do
|
||||
start_pleroma()
|
||||
|
||||
with :ok <- Pleroma.Config.get([Pleroma.Search, :module]).create_index() do
|
||||
IO.puts("Index created")
|
||||
else
|
||||
e -> IO.puts("Could not create index: #{inspect(e)}")
|
||||
end
|
||||
end
|
||||
|
||||
def run(["drop_index"]) do
|
||||
start_pleroma()
|
||||
|
||||
with :ok <- Pleroma.Config.get([Pleroma.Search, :module]).drop_index() do
|
||||
IO.puts("Index dropped")
|
||||
else
|
||||
e -> IO.puts("Could not drop index: #{inspect(e)}")
|
||||
end
|
||||
end
|
||||
|
||||
def run(["index" | options]) do
|
||||
{options, [], []} =
|
||||
OptionParser.parse(
|
||||
options,
|
||||
strict: [
|
||||
chunk: :integer,
|
||||
limit: :integer,
|
||||
step: :integer
|
||||
]
|
||||
)
|
||||
|
||||
start_pleroma()
|
||||
|
||||
chunk_size = Keyword.get(options, :chunk, 100)
|
||||
limit = Keyword.get(options, :limit, 100_000)
|
||||
per_step = Keyword.get(options, :step, 1000)
|
||||
|
||||
chunks = max(div(limit, per_step), 1)
|
||||
|
||||
1..chunks
|
||||
|> Enum.each(fn step ->
|
||||
q =
|
||||
from(a in Pleroma.Activity,
|
||||
limit: ^per_step,
|
||||
offset: ^per_step * (^step - 1),
|
||||
select: [:id],
|
||||
order_by: [desc: :id]
|
||||
)
|
||||
|
||||
{:ok, ids} =
|
||||
Pleroma.Repo.transaction(fn ->
|
||||
Pleroma.Repo.stream(q, timeout: :infinity)
|
||||
|> Enum.map(fn a ->
|
||||
a.id
|
||||
end)
|
||||
end)
|
||||
|
||||
IO.puts("Got #{length(ids)} activities, adding to indexer")
|
||||
|
||||
ids
|
||||
|> Enum.chunk_every(chunk_size)
|
||||
|> Enum.each(fn chunk ->
|
||||
IO.puts("Adding #{length(chunk)} activities to indexing queue")
|
||||
|
||||
chunk
|
||||
|> Enum.map(fn id ->
|
||||
SearchIndexingWorker.new(%{"op" => "add_to_index", "activity" => id})
|
||||
end)
|
||||
|> Oban.insert_all()
|
||||
end)
|
||||
end)
|
||||
end
|
||||
end
|
25
lib/mix/tasks/pleroma/test_runner.ex
Normal file
25
lib/mix/tasks/pleroma/test_runner.ex
Normal file
|
@ -0,0 +1,25 @@
|
|||
defmodule Mix.Tasks.Pleroma.TestRunner do
|
||||
@shortdoc "Retries tests once if they fail"
|
||||
|
||||
use Mix.Task
|
||||
|
||||
def run(args \\ []) do
|
||||
case System.cmd("mix", ["test"] ++ args, into: IO.stream(:stdio, :line)) do
|
||||
{_, 0} ->
|
||||
:ok
|
||||
|
||||
_ ->
|
||||
retry(args)
|
||||
end
|
||||
end
|
||||
|
||||
def retry(args) do
|
||||
case System.cmd("mix", ["test", "--failed"] ++ args, into: IO.stream(:stdio, :line)) do
|
||||
{_, 0} ->
|
||||
:ok
|
||||
|
||||
_ ->
|
||||
exit(1)
|
||||
end
|
||||
end
|
||||
end
|
|
@ -14,6 +14,7 @@ defmodule Pleroma.Application do
|
|||
@name Mix.Project.config()[:name]
|
||||
@version Mix.Project.config()[:version]
|
||||
@repository Mix.Project.config()[:source_url]
|
||||
@compile_env Mix.env()
|
||||
|
||||
def name, do: @name
|
||||
def version, do: @version
|
||||
|
@ -51,7 +52,11 @@ defmodule Pleroma.Application do
|
|||
Pleroma.HTML.compile_scrubbers()
|
||||
Pleroma.Config.Oban.warn()
|
||||
Config.DeprecationWarnings.warn()
|
||||
Pleroma.Web.Plugs.HTTPSecurityPlug.warn_if_disabled()
|
||||
|
||||
if @compile_env != :test do
|
||||
Pleroma.Web.Plugs.HTTPSecurityPlug.warn_if_disabled()
|
||||
end
|
||||
|
||||
Pleroma.ApplicationRequirements.verify!()
|
||||
load_custom_modules()
|
||||
Pleroma.Docs.JSON.compile()
|
||||
|
@ -287,8 +292,6 @@ defmodule Pleroma.Application do
|
|||
config = Config.get(ConcurrentLimiter, [])
|
||||
|
||||
[
|
||||
Pleroma.Web.RichMedia.Helpers,
|
||||
Pleroma.Web.ActivityPub.MRF.MediaProxyWarmingPolicy,
|
||||
Pleroma.Search,
|
||||
Pleroma.Webhook.Notify
|
||||
]
|
||||
|
|
|
@ -241,10 +241,9 @@ defmodule Pleroma.ApplicationRequirements do
|
|||
|
||||
missing_mrfs =
|
||||
Enum.reduce(mrfs, [], fn x, acc ->
|
||||
if Code.ensure_compiled(x) do
|
||||
acc
|
||||
else
|
||||
acc ++ [x]
|
||||
case Code.ensure_compiled(x) do
|
||||
{:module, _} -> acc
|
||||
{:error, _} -> acc ++ [x]
|
||||
end
|
||||
end)
|
||||
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
# Pleroma: A lightweight social networking server
|
||||
# Copyright © 2017-2022 Pleroma Authors <https://pleroma.social/>
|
||||
# Copyright © 2017-2023 Pleroma Authors <https://pleroma.social/>
|
||||
# SPDX-License-Identifier: AGPL-3.0-only
|
||||
|
||||
defmodule Pleroma.Config.TransferTask do
|
||||
|
@ -44,14 +44,9 @@ defmodule Pleroma.Config.TransferTask do
|
|||
with {_, true} <- {:configurable, Config.get(:configurable_from_database)} do
|
||||
# We need to restart applications for loaded settings take effect
|
||||
|
||||
{logger, other} =
|
||||
settings =
|
||||
(Repo.all(ConfigDB) ++ deleted_settings)
|
||||
|> Enum.map(&merge_with_default/1)
|
||||
|> Enum.split_with(fn {group, _, _, _} -> group in [:logger] end)
|
||||
|
||||
logger
|
||||
|> Enum.sort()
|
||||
|> Enum.each(&configure/1)
|
||||
|
||||
started_applications = Application.started_applications()
|
||||
|
||||
|
@ -64,7 +59,7 @@ defmodule Pleroma.Config.TransferTask do
|
|||
[:pleroma | reject]
|
||||
end
|
||||
|
||||
other
|
||||
settings
|
||||
|> Enum.map(&update/1)
|
||||
|> Enum.uniq()
|
||||
|> Enum.reject(&(&1 in reject))
|
||||
|
@ -102,38 +97,6 @@ defmodule Pleroma.Config.TransferTask do
|
|||
{group, key, value, merged}
|
||||
end
|
||||
|
||||
# change logger configuration in runtime, without restart
|
||||
defp configure({_, :backends, _, merged}) do
|
||||
# removing current backends
|
||||
Enum.each(Application.get_env(:logger, :backends), &Logger.remove_backend/1)
|
||||
|
||||
Enum.each(merged, &Logger.add_backend/1)
|
||||
|
||||
:ok = update_env(:logger, :backends, merged)
|
||||
end
|
||||
|
||||
defp configure({_, key, _, merged}) when key in [:console, :ex_syslogger] do
|
||||
merged =
|
||||
if key == :console do
|
||||
put_in(merged[:format], merged[:format] <> "\n")
|
||||
else
|
||||
merged
|
||||
end
|
||||
|
||||
backend =
|
||||
if key == :ex_syslogger,
|
||||
do: {ExSyslogger, :ex_syslogger},
|
||||
else: key
|
||||
|
||||
Logger.configure_backend(backend, merged)
|
||||
:ok = update_env(:logger, key, merged)
|
||||
end
|
||||
|
||||
defp configure({_, key, _, merged}) do
|
||||
Logger.configure([{key, merged}])
|
||||
:ok = update_env(:logger, key, merged)
|
||||
end
|
||||
|
||||
defp update({group, key, value, merged}) do
|
||||
try do
|
||||
:ok = update_env(group, key, merged)
|
||||
|
|
|
@ -165,8 +165,7 @@ defmodule Pleroma.ConfigDB do
|
|||
{:pleroma, :ecto_repos},
|
||||
{:mime, :types},
|
||||
{:cors_plug, [:max_age, :methods, :expose, :headers]},
|
||||
{:swarm, :node_blacklist},
|
||||
{:logger, :backends}
|
||||
{:swarm, :node_blacklist}
|
||||
]
|
||||
|
||||
Enum.any?(full_key_update, fn
|
||||
|
@ -385,7 +384,12 @@ defmodule Pleroma.ConfigDB do
|
|||
|
||||
@spec module_name?(String.t()) :: boolean()
|
||||
def module_name?(string) do
|
||||
Regex.match?(~r/^(Pleroma|Phoenix|Tesla|Ueberauth|Swoosh)\./, string) or
|
||||
string in ["Oban", "Ueberauth", "ExSyslogger", "ConcurrentLimiter"]
|
||||
if String.contains?(string, ".") do
|
||||
[name | _] = String.split(string, ".", parts: 2)
|
||||
|
||||
name in ~w[Pleroma Phoenix Tesla Ueberauth Swoosh Logger LoggerBackends]
|
||||
else
|
||||
string in ~w[Oban Ueberauth ExSyslogger ConcurrentLimiter]
|
||||
end
|
||||
end
|
||||
end
|
||||
|
|
|
@ -416,10 +416,10 @@ defmodule Pleroma.Emoji.Pack do
|
|||
end
|
||||
|
||||
defp create_archive_and_cache(pack, hash) do
|
||||
files = ['pack.json' | Enum.map(pack.files, fn {_, file} -> to_charlist(file) end)]
|
||||
files = [~c"pack.json" | Enum.map(pack.files, fn {_, file} -> to_charlist(file) end)]
|
||||
|
||||
{:ok, {_, result}} =
|
||||
:zip.zip('#{pack.name}.zip', files, [:memory, cwd: to_charlist(pack.path)])
|
||||
:zip.zip(~c"#{pack.name}.zip", files, [:memory, cwd: to_charlist(pack.path)])
|
||||
|
||||
ttl_per_file = Pleroma.Config.get!([:emoji, :shared_pack_cache_seconds_per_file])
|
||||
overall_ttl = :timer.seconds(ttl_per_file * Enum.count(files))
|
||||
|
@ -586,7 +586,7 @@ defmodule Pleroma.Emoji.Pack do
|
|||
with :ok <- File.mkdir_p!(local_pack.path) do
|
||||
files = Enum.map(remote_pack["files"], fn {_, path} -> to_charlist(path) end)
|
||||
# Fallback cannot contain a pack.json file
|
||||
files = if pack_info[:fallback], do: files, else: ['pack.json' | files]
|
||||
files = if pack_info[:fallback], do: files, else: [~c"pack.json" | files]
|
||||
|
||||
:zip.unzip(archive, cwd: to_charlist(local_pack.path), file_list: files)
|
||||
end
|
||||
|
|
|
@ -43,10 +43,6 @@ defmodule Pleroma.Frontend do
|
|||
{:download_or_unzip, _} ->
|
||||
Logger.info("Could not download or unzip the frontend")
|
||||
{:error, "Could not download or unzip the frontend"}
|
||||
|
||||
_e ->
|
||||
Logger.info("Could not install the frontend")
|
||||
{:error, "Could not install the frontend"}
|
||||
end
|
||||
end
|
||||
|
||||
|
|
|
@ -9,7 +9,7 @@ defmodule Pleroma.Gun.ConnectionPool.Reclaimer do
|
|||
|
||||
def start_monitor do
|
||||
pid =
|
||||
case GenServer.start_link(__MODULE__, [], name: {:via, Registry, {registry(), "reclaimer"}}) do
|
||||
case GenServer.start(__MODULE__, [], name: {:via, Registry, {registry(), "reclaimer"}}) do
|
||||
{:ok, pid} ->
|
||||
pid
|
||||
|
||||
|
|
|
@ -5,6 +5,9 @@
|
|||
defmodule Pleroma.Gun.ConnectionPool.WorkerSupervisor do
|
||||
@moduledoc "Supervisor for pool workers. Does not do anything except enforce max connection limit"
|
||||
|
||||
alias Pleroma.Config
|
||||
alias Pleroma.Gun.ConnectionPool.Worker
|
||||
|
||||
use DynamicSupervisor
|
||||
|
||||
def start_link(opts) do
|
||||
|
@ -14,21 +17,28 @@ defmodule Pleroma.Gun.ConnectionPool.WorkerSupervisor do
|
|||
def init(_opts) do
|
||||
DynamicSupervisor.init(
|
||||
strategy: :one_for_one,
|
||||
max_children: Pleroma.Config.get([:connections_pool, :max_connections])
|
||||
max_children: Config.get([:connections_pool, :max_connections])
|
||||
)
|
||||
end
|
||||
|
||||
def start_worker(opts, last_attempt \\ false) do
|
||||
case DynamicSupervisor.start_child(__MODULE__, {Pleroma.Gun.ConnectionPool.Worker, opts}) do
|
||||
{:error, :max_children} ->
|
||||
funs = [fn -> last_attempt end, fn -> match?(:error, free_pool()) end]
|
||||
def start_worker(opts, last_attempt \\ false)
|
||||
|
||||
if Enum.any?(funs, fn fun -> fun.() end) do
|
||||
:telemetry.execute([:pleroma, :connection_pool, :provision_failure], %{opts: opts})
|
||||
{:error, :pool_full}
|
||||
else
|
||||
start_worker(opts, true)
|
||||
end
|
||||
def start_worker(opts, true) do
|
||||
case DynamicSupervisor.start_child(__MODULE__, {Worker, opts}) do
|
||||
{:error, :max_children} ->
|
||||
:telemetry.execute([:pleroma, :connection_pool, :provision_failure], %{opts: opts})
|
||||
{:error, :pool_full}
|
||||
|
||||
res ->
|
||||
res
|
||||
end
|
||||
end
|
||||
|
||||
def start_worker(opts, false) do
|
||||
case DynamicSupervisor.start_child(__MODULE__, {Worker, opts}) do
|
||||
{:error, :max_children} ->
|
||||
free_pool()
|
||||
start_worker(opts, true)
|
||||
|
||||
res ->
|
||||
res
|
||||
|
|
|
@ -16,4 +16,15 @@ defmodule Pleroma.Helpers.InetHelper do
|
|||
def parse_address(ip) do
|
||||
:inet.parse_address(ip)
|
||||
end
|
||||
|
||||
def parse_cidr(proxy) when is_binary(proxy) do
|
||||
proxy =
|
||||
cond do
|
||||
"/" in String.codepoints(proxy) -> proxy
|
||||
InetCidr.v4?(InetCidr.parse_address!(proxy)) -> proxy <> "/32"
|
||||
InetCidr.v6?(InetCidr.parse_address!(proxy)) -> proxy <> "/128"
|
||||
end
|
||||
|
||||
InetCidr.parse_cidr!(proxy, true)
|
||||
end
|
||||
end
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Add a link
Reference in a new issue