Nowdays people's interest in Mastodon is increasing. That's why I decided to join the fediverse with my onw server, deployed on an RPI 5/8GB and docker. Requirement:
First of all, we have to create the directory we are going to use to store data:
$ cd /opt && sudo git clone https://github.com/mastodon/mastodon live
$ cd live && sudo git checkout $(git tag -l | grep -v 'rc[0-9]*$' | sort -V | tail -n 1)
Last line is needed to checkout last stable image; since we don't want to build it again, we have to comment the build . lines from docker-compose.yaml
$ sudo sed -i 's/build/#build/g' docker-compose.yml
At the time of writing, the latest stable build is 4.2.8 while the docker-compose.yaml provides istructions to pull 4.2.7. We have to change it:
$ sudo sed -i 's/v4.2.7/v4.2.8/g' docker-compose.yml
That said, we have to copy the example .env.production.sample into .env.production and edit it to suit our needs:
$ sudo cp .env.production.sample .env.production
$ vim .env.prodtucion
Its content, without comments, is:
LOCAL_DOMAIN=example.com
REDIS_HOST=localhost
REDIS_PORT=6379
DB_HOST=/var/run/postgresql
DB_USER=mastodon
DB_NAME=mastodon_production
DB_PASS=
DB_PORT=5432
ES_ENABLED=true
ES_HOST=localhost
ES_PORT=9200
ES_USER=elastic
ES_PASS=password
SECRET_KEY_BASE=
OTP_SECRET=
VAPID_PRIVATE_KEY=
VAPID_PUBLIC_KEY=
SMTP_SERVER=
SMTP_PORT=587
SMTP_LOGIN=
SMTP_PASSWORD=
[email protected]
S3_ENABLED=true
S3_BUCKET=files.example.com
AWS_ACCESS_KEY_ID=
AWS_SECRET_ACCESS_KEY=
S3_ALIAS_HOST=files.example.com
IP_RETENTION_PERIOD=31556952
SESSION_RETENTION_PERIOD=31556952
So let's adapt it:
LOCAL_DOMAIN=mastodon.cyberveins.eu # Inserted my own domain
REDIS_HOST=redis # in docker-compose.yml redis' host is redis and not localhost
REDIS_PORT=6379
DB_HOST=/var/run/postgresql
DB_USER=mastodon
DB_NAME=mastodon_production
DB_PASS=c71514d847667a251ec476440700cf8de7f6a975ddfb4b5cd7a01cd427bc4919 # obtained with openssl rand -hex 32
DB_PORT=5432
ES_ENABLED=false # we'll change it later on, if we need it
ES_HOST=es # changed because of docker compose
ES_PORT=9200
ES_USER=elastic
ES_PASS=1b594a97fd5aaa875264bd5a5d987ae202907e2fe0c9a93427fd120b94d3fe96
SECRET_KEY_BASE= # LEAVE EMPTY
OTP_SECRET= # LEAVE EMPTY
VAPID_PRIVATE_KEY= # LEAVE EMPTY
VAPID_PUBLIC_KEY= # LEAVE EMPTY
SMTP_SERVER= smtp-relay.brevo.com # I use brevo, you can use whatever you like
SMTP_PORT=587
SMTP_LOGIN= my-brevo-login
SMTP_PASSWORD= my-brevo-password
[email protected]
S3_ENABLED=false # I don't use S3
S3_BUCKET=files.example.com
AWS_ACCESS_KEY_ID=
AWS_SECRET_ACCESS_KEY=
S3_ALIAS_HOST=files.example.com
IP_RETENTION_PERIOD=31556952
SESSION_RETENTION_PERIOD=31556952
Now let's procede with a
$ sudo docker pull
$ sudo mkdir -p public/system
$ sudo chown -R 991:991 public
And let's setup it up:
$ docker-compose run --rm web bundle exec rake mastodon:setup
WARNING: the setup process will ask again what we wrote: just confirm/retype the same things. Pay attention at the last part of the setup process: you will be given, on stdout, a FULL copy of the .env.production file, comprehensive of keys (secret, otp, vapid): copy the whole output and replace the .env.production with it's content, by deleting everything you previoulsy wrote by hand.
Now you can docker compose up
$ sudo docker compose up -d
Time to configure nginx to serve.
$ sudo mkdir /etc/nginx/sites-enables/mastodon.cyberveins.eu.conf
Let's write it:
map $http_upgrade $connection_upgrade {
default upgrade;
'' close;
}
upstream backend {
server 127.0.0.1:3000 fail_timeout=0;
}
upstream streaming {
server 127.0.0.1:4000 fail_timeout=0;
}
proxy_cache_path /var/cache/nginx levels=1:2 keys_zone=CACHE:10m inactive=7d max_size=1g;
server {
listen 80;
server_name mastodon.cyberveins.eu;
root /opt/live/public;
location /.well-known/acme-challenge/ { allow all; }
location / { return 301 https://$host$request_uri; }
}
server {
listen 443 ssl http2;
listen [::]:443 ssl http2;
server_name mastodon.cyberveins.eu;
# intermediate SSL config (Mozilla Guideline)
ssl_protocols TLSv1.2 TLSv1.3;
ssl_ciphers ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-CHACHA20-POLY1305:ECDHE-RSA-CHACHA20-POLY1305:DHE-RSA-AES128-GCM-SHA256:DHE-RSA-AES256-GCM-SHA384;
ssl_prefer_server_ciphers off;
# Uncomment these lines once you acquire a certificate:
ssl_certificate /etc/letsencrypt/live/mastodon.cyberveins.eu/fullchain.pem;
ssl_certificate_key /etc/letsencrypt/live/mastodon.cyberveins.eu/privkey.pem;
ssl_session_timeout 1d;
ssl_session_cache shared:MozSSL:10m; # about 40000 sessions
ssl_session_tickets off;
keepalive_timeout 70;
sendfile on;
client_max_body_size 80m;
root /opt/live/public;
gzip on;
gzip_disable "msie6";
gzip_vary on;
gzip_proxied any;
gzip_comp_level 6;
gzip_buffers 16 8k;
gzip_http_version 1.1;
gzip_types text/plain text/css application/json application/javascript text/xml application/xml application/xml+rss text/javascript image/svg+xml image/x-icon;
location / {
try_files $uri @proxy;
}
# If Docker is used for deployment and Rails serves static files,
# then needed must replace line `try_files $uri =404;` with `try_files $uri @proxy;`.
location = /sw.js {
add_header Cache-Control "public, max-age=604800, must-revalidate";
add_header Strict-Transport-Security "max-age=63072000; includeSubDomains";
try_files $uri @proxy;
}
location ~ ^/assets/ {
add_header Cache-Control "public, max-age=2419200, must-revalidate";
add_header Strict-Transport-Security "max-age=63072000; includeSubDomains";
try_files $uri @proxy;
}
location ~ ^/avatars/ {
add_header Cache-Control "public, max-age=2419200, must-revalidate";
add_header Strict-Transport-Security "max-age=63072000; includeSubDomains";
try_files $uri @proxy;
}
location ~ ^/emoji/ {
add_header Cache-Control "public, max-age=2419200, must-revalidate";
add_header Strict-Transport-Security "max-age=63072000; includeSubDomains";
try_files $uri @proxy;
}
location ~ ^/headers/ {
add_header Cache-Control "public, max-age=2419200, must-revalidate";
add_header Strict-Transport-Security "max-age=63072000; includeSubDomains";
try_files $uri @proxy;
}
location ~ ^/packs/ {
add_header Cache-Control "public, max-age=2419200, must-revalidate";
add_header Strict-Transport-Security "max-age=63072000; includeSubDomains";
try_files $uri @proxy;
}
location ~ ^/shortcuts/ {
add_header Cache-Control "public, max-age=2419200, must-revalidate";
add_header Strict-Transport-Security "max-age=63072000; includeSubDomains";
try_files $uri @proxy;
}
location ~ ^/sounds/ {
add_header Cache-Control "public, max-age=2419200, must-revalidate";
add_header Strict-Transport-Security "max-age=63072000; includeSubDomains";
try_files $uri @proxy;
}
location ~ ^/system/ {
add_header Cache-Control "public, max-age=2419200, immutable";
add_header Strict-Transport-Security "max-age=63072000; includeSubDomains";
try_files $uri @proxy;
}
location ^~ /api/v1/streaming {
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_set_header Proxy "";
proxy_pass http://streaming;
proxy_buffering off;
proxy_redirect off;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection $connection_upgrade;
add_header Strict-Transport-Security "max-age=63072000; includeSubDomains";
tcp_nodelay on;
}
location @proxy {
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_set_header Proxy "";
proxy_pass_header Server;
proxy_pass http://backend;
proxy_buffering on;
proxy_redirect off;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection $connection_upgrade;
proxy_cache CACHE;
proxy_cache_valid 200 7d;
proxy_cache_valid 410 24h;
proxy_cache_use_stale error timeout updating http_500 http_502 http_503 http_504;
add_header X-Cached $upstream_cache_status;
tcp_nodelay on;
}
error_page 404 500 501 502 503 504 /500.html;
}
After that, let's request certificates to letsencrypt:
$ sudo certbot certonly --nginx --preferred-challenges http -d mastodon.cyberveins.eu
$ sudo ln -sf /etc/nginx/sites-available/mastodon.cyberveins.eu.conf /etc/nginx/sites-enabled/
$ sudo nginx -t
If all went ok:
$ sudo systemctl restart nginx