• 1 Post
  • 9 Comments
Joined 1Y ago
cake
Cake day: Jun 26, 2023

help-circle
rss

Added internal nginx and external proxy configs to a reply. I didn’t make any changes to the postgres config.

Hope it helps


This is the nginx.conf file for my external proxy:

server {
    listen 443 ssl http2;
    listen [::]:443 ssl http2;

    server_name ;

    include /config/nginx/ssl.conf;

    location / {
        include /config/nginx/proxy.conf;
        include /config/nginx/resolver.conf;
#        set $upstream_app lemmy;
        set $upstream_app proxy;
        set $upstream_port 8536;
        set $upstream_proto http;
        proxy_pass $upstream_proto://$upstream_app:$upstream_port;
#        proxy_http_version 1.1;
        proxy_set_header Upgrade $http_upgrade;
        proxy_set_header Connection "upgrade";
        proxy_set_header X-Real-IP $remote_addr;
#        proxy_set_header Host $host;
        proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
        client_max_body_size 50M;            
    }
}

access_log /var/log/nginx/access.log combined;

You’ll need to change  to the appropriate value. I’m forwarding requests to the proxy container referenced by the compose file

My nginx.conf for lemmy-nginx is below, sorry if it’s a bit messy. I prefer to comment than remove working config. You’ll have to change

worker_processes 1;
events {
    worker_connections 1024;
}
http {
#Beginning of kbin fix
# We construct a string consistent of the "request method" and "http accept header"
    # and then apply soem ~simply regexp matches to that combination to decide on the
    # HTTP upstream we should proxy the request to.
    #
    # Example strings:
    #
    #   "GET:application/activity+json"
    #   "GET:text/html"
    #   "POST:application/activity+json"
    #
    # You can see some basic match tests in this regex101 matching this configuration
    # https://regex101.com/r/vwMJNc/1
    #
    # Learn more about nginx maps here http://nginx.org/en/docs/http/ngx_http_map_module.html
    map "$request_method:$http_accept" $proxpass {
        # If no explicit matches exists below, send traffic to lemmy-ui
        default "http://lemmy-ui";

        # GET/HEAD requests that accepts ActivityPub or Linked Data JSON should go to lemmy.
        #
        # These requests are used by Mastodon and other fediverse instances to look up profile information,
        # discover site information and so on.
        "~^(?:GET|HEAD):.*?application\/(?:activity|ld)\+json" "http://lemmy";

        # All non-GET/HEAD requests should go to lemmy
        #
        # Rather than calling out POST, PUT, DELETE, PATCH, CONNECT and all the verbs manually
        # we simply negate the GET|HEAD pattern from above and accept all possibly $http_accept values
        "~^(?!(GET|HEAD)).*:" "http://lemmy";
    }
### end of kbin fix
    upstream lemmy {
        # this needs to map to the lemmy (server) docker service hostname
        server "lemmy:8536";
    }
    upstream lemmy-ui {
        # this needs to map to the lemmy-ui docker service hostname
        server "lemmy-ui:1234";
    }

    server {
        # this is the port inside docker, not the public one yet
        listen 1236;
        listen 8536;
        # change if needed, this is facing the public web
        #server_name localhost;
	server_name ;
        server_tokens off;

        gzip on;
        gzip_types text/css application/javascript image/svg+xml;
        gzip_vary on;

        # Upload limit, relevant for pictrs
        client_max_body_size 100M;

        add_header X-Frame-Options SAMEORIGIN;
        add_header X-Content-Type-Options nosniff;
        add_header X-XSS-Protection "1; mode=block";

        # frontend general requests
        location / {
            # distinguish between ui requests and backend
            # don't change lemmy-ui or lemmy here, they refer to the upstream definitions on top
#            set $proxpass "http://lemmy-ui";

#            if ($http_accept = "application/activity+json") {
#              set $proxpass "http://lemmy";
#            }
#            if ($http_accept = "application/ld+json; profile=\"https://www.w3.org/ns/activitystreams\"") {
#              set $proxpass "http://lemmy";
#            }
#            if ($request_method = POST) {
#              set $proxpass "http://lemmy";
#            }
            proxy_pass $proxpass;

            rewrite ^(.+)/+$ $1 permanent;
            # Send actual client IP upstream
            proxy_set_header X-Real-IP $remote_addr;
            proxy_set_header Host $host;
            proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
        }

        # backend
        location ~ ^/(api|pictrs|feeds|nodeinfo|.well-known) {
            proxy_pass "http://lemmy";
            # proxy common stuff
            proxy_http_version 1.1;
            proxy_set_header Upgrade $http_upgrade;
            proxy_set_header Connection "upgrade";

            # Send actual client IP upstream
            proxy_set_header X-Real-IP $remote_addr;
            proxy_set_header Host $host;
            proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
        }
    }
}
#error_log /var/log/nginx/error.log debug;

Yes, sorry. I have

COMMUNITY_SORT_METHODS: '[
        "TopAll",
        "TopDay",
        "TopHour" ]'

I added Top Hour. I left the run schedule at 240 minutes but it seems to keep up pretty well. I’m regularly subscribing to new communities from the All tab


I can vouch for community seeder, my personal instance all page looks as populated as my kbin.social account.


I use a reverse proxy so I still use DNS name to access internally.


Hasn’t Kubernetes already replaced master-slave with master/manager-worker? Seems like there are plenty of alternatives.


Lemmy Community Seeder (LCS) - For pre-populating the “All” feed - nowsci
cross-posted from: https://mimiclem.me/post/7601 > Crossposting [this](https://lemmy.nowsci.com/post/2759) from [@fmstrat@lemmy.nowsci.com](https://lemmy.nowsci.com/u/fmstrat), seems almost essential for small instances: When launching a new Lemmy instance, your All feed will have very little populated. Also as a small instance, new communities that crop up may never make their way to you. LCS is a tool to seed communities, so your users have something in their All feed, right from the start. It tells your instance to pull the top communities and the communities with the top posts from your favorite instances. > > How to run manually and in docker is included in the repo. > > Let me know if there’s anything anyone needs it to do and I’ll see if I can fit it in. I’m going to work on a “purge old posts that are unsaved and not commented on by local users” first, since small instances are sure to run out of disk space
fedilink

I was able to get it running with the docker compose CA app and minimal changes to the official docker-compose file (docs). I’m running swag in front of the lemmy proxy with no issues with federation. Ibracorp’s tutorial can help with the compose plugin

You’ll need to make sure the volume locations and ports are appropriate for your unraid install. You’ll also have to update your domain in the compose file. I’m running swag so I needed to make a new reverse proxy configuration per the official docs.

My compose file is below:

version: "3.7"

x-logging: &default-logging
  driver: "json-file"
  options:
    max-size: "50m"
    max-file: 4

networks:
  # communication to web and clients
  lemmyexternalproxy:
    name: proxynet
    external: true
  # communication between lemmy services
  lemmyinternal:
    driver: bridge
    internal: true

services:
  proxy:
    image: nginx:1-alpine
    networks:
      - lemmyinternal
      - lemmyexternalproxy
    ports:
      # actual and only port facing any connection from outside
      # Note, change the left number if port 1236 is already in use on your system
      # You could use port 80 if you won't use a reverse proxy
      - "8536:8536"
    volumes:
      #- nginx.conf:/etc/nginx/nginx.conf:ro,Z
      - /mnt/user/appdata/lemmy-nginx/nginx.conf:/etc/nginx/nginx.conf:ro
    restart: always
    depends_on:
      - pictrs
      - lemmy-ui
    logging: *default-logging

  lemmy:
    image: dessalines/lemmy:0.18.0
    #image: dessalines/lemmy:dev
    # use this to build your local lemmy server image for development
    # run docker compose up --build
    # build:
    #  context: ../
    #  dockerfile: docker/Dockerfile
      # args:
      #   RUST_RELEASE_MODE: release
    # this hostname is used in nginx reverse proxy and also for lemmy ui to connect to the backend, do not change
    hostname: lemmy
    networks:
      - lemmyinternal
      - lemmyexternalproxy
    restart: always
    environment:
      - RUST_LOG="warn,lemmy_server=debug,lemmy_api=debug,lemmy_api_common=debug,lemmy_api_crud=debug,lemmy_apub=debug,lemmy_db_schema=debug,lemmy_db_views=debug,lemmy_db_views_actor=debug,lemmy_db_views_moderator=debug,lemmy_routes=debug,lemmy_utils=debug,lemmy_websocket=debug"
      - RUST_BACKTRACE=full
      - LEMMY_CORS_ORIGIN=<domain>
    volumes:
      - /mnt/user/appdata/lemmy/lemmy.hjson:/config/config.hjson
    depends_on:
      - postgres
      - pictrs
    logging: *default-logging

  lemmy-ui:
    #image: dessalines/lemmy-ui:latest
    image: dessalines/lemmy-ui:0.18.0
    # use this to build your local lemmy ui image for development
    # run docker compose up --build
    # assuming lemmy-ui is cloned besides lemmy directory
    # build:
    #   context: ../../lemmy-ui
    #   dockerfile: dev.dockerfile
    networks:
      - lemmyinternal
    environment:
      # this needs to match the hostname defined in the lemmy service
      - LEMMY_UI_LEMMY_INTERNAL_HOST=lemmy:8536
      # set the outside hostname here
      #- LEMMY_UI_LEMMY_EXTERNAL_HOST=localhost:1236
      - LEMMY_UI_LEMMY_EXTERNAL_HOST=<domain>
      - LEMMY_HTTPS=false
      - LEMMY_UI_DEBUG=true
    depends_on:
      - lemmy
    restart: always
    logging: *default-logging
    init: true

  pictrs:
    image: asonix/pictrs:0.4.0-rc.7
    # this needs to match the pictrs url in lemmy.hjson
    hostname: pictrs
    # we can set options to pictrs like this, here we set max. image size and forced format for conversion
    # entrypoint: /sbin/tini -- /usr/local/bin/pict-rs -p /mnt -m 4 --image-format webp
    networks:
      - lemmyinternal
    environment:
      - PICTRS_OPENTELEMETRY_URL=http://otel:4137
      - PICTRS__API_KEY=API_KEY
      - RUST_LOG=debug
      - RUST_BACKTRACE=full
      - PICTRS__MEDIA__VIDEO_CODEC=vp9
      - PICTRS__MEDIA__GIF__MAX_WIDTH=256
      - PICTRS__MEDIA__GIF__MAX_HEIGHT=256
      - PICTRS__MEDIA__GIF__MAX_AREA=65536
      - PICTRS__MEDIA__GIF__MAX_FRAME_COUNT=400
    user: 991:991
    volumes:
      - /mnt/user/appdata/lemmy-pictrs:/mnt
    restart: always
    logging: *default-logging

  postgres:
    image: postgres:15-alpine
    # this needs to match the database host in lemmy.hson
    # Tune your settings via
    # https://pgtune.leopard.in.ua/#/
    # You can use this technique to add them here
    # https://stackoverflow.com/a/30850095/1655478
    hostname: postgres
    command:
      [
        "postgres",
        "-c",
        "session_preload_libraries=auto_explain",
        "-c",
        "auto_explain.log_min_duration=5ms",
        "-c",
        "auto_explain.log_analyze=true",
        "-c",
        "track_activity_query_size=1048576",
      ]
    networks:
      - lemmyinternal
      # adding the external facing network to allow direct db access for devs
      - lemmyexternalproxy
    ports:
      # use a different port so it doesnt conflict with potential postgres db running on the host
      - "5433:5432"
    environment:
      - POSTGRES_USER=<strong-user>
      - POSTGRES_PASSWORD=<strong-password>
      - POSTGRES_DB=lemmy
    volumes:
      - /mnt/user/appdata/lemmy-postgres:/var/lib/postgresql/data
    restart: always
    logging: *default-logging