Compare commits

...

3 Commits

Author SHA1 Message Date
c8c413bd75 08-04-2025 2025-04-08 11:27:00 +02:00
011cfcba40 25-03-2025 2025-03-25 15:52:48 +01:00
259b9c6a24 Màj 06-04-2024 2024-04-06 09:54:09 +02:00
137 changed files with 9216 additions and 271 deletions

View File

@@ -0,0 +1,217 @@
# 1 Password-cli
```bash
# Login
eval $(op signin)
# Get favorites
op item list --vault "Private" --favorite
# Get a specific item
op item get <ID>
# !! Important: Sign out at the end
op signout
```
```bash
1login() {
eval $(op signin)
}
alias 1signout="op signout"
1search() {
term=$1
if [ -n "$2" ]
then
vault="$2"
else
vault="Private"
fi
echo "Searching for '$term' in vaut '$vault'"
op item list --vault "$vault" --long | grep "$term" --ignore-case
}
1get() {
op item get $*
}
```
Se connecter à 1Password CLI
```bash
eval $(op signin)
```
Liste des coffres:
```bash
op vault list
ID NAME
abcdefabcdefabcdefabcdefab Personal
```
Voir tous les items d'un coffre:
```
op item list --vault "Personal"
ID TITLE VAULT EDITED
abcdefabcdefabcdefabcdefab Maxmind Geoip Personal 1 year ago
```
Récupérer un mot-de-passe:
```bash
op item get "OVH - espace client" --fields label=password
[use 'op item get abcdefabcdefabcdefabcdefab --reveal' to reveal]
```
```bash
op item get "OVH - espace client" --fields label=password --format json | jq -r .value
PassW0rd
```
Voir tous les champs d'un item:
```bash
op item get "OVH - espace client"
```
Créer un nouvel item de connexion:
```bash
op item create --category login \
--title "New Service" \
--vault "Personal" \
username="user@example.com" \
password="mysecret123" \
url="https://example.com"
```
Générer et stocker un nouveau mot-de-passe:
```bash
op item create --category password \
--title "Generated Password" \
--generate-password
ID: rwtorqqflnsrx6egzdgfmc3ssy
Title: Generated Password
Vault: Personal (abcdefabcdefabcdefabcdefab)
Created: now
Updated: now
Favorite: false
Version: 1
Category: PASSWORD
Fields:
password: [use 'op item get abcdefabcdefabcdefabcdefab --reveal' to reveal]
```
Créer une note sécurisée:
```bash
op item create --category "Secure Note" \
--title "Project Notes" \
--vault "Personal" \
'notesPlain[text]="Important project details..."'
ID: wy6lvv2pbs7v46x5pj5d7o2cnm
Title: Project Notes
Vault: Personal (abcdefabcdefabcdefabcdefab)
Created: now
Updated: now
Favorite: false
Version: 1
Category: SECURE_NOTE
Fields:
notesPlain: "Important project details..."
```
Utilisation de secrets dans les variables environnement:
```bash
eval $(op signin)
export DB_PASSWORD="$(op item get "Database" --fields label=password)"
export API_KEY="$(op item get "API Keys" --fields label=key)"
```
Lire une clé SSH:
```bash
op item get "id_rsa" --fields label=private_key
[use 'op item get abcdefabcdefabcdefabcdefab --reveal' to reveal]
```
Utilisation avec une API:
```bash
# Get API token from 1Password and use in API call
export API_TOKEN="$(
op item get "Service API Key" --fields label=password \
--format json | jq -r .value)"
curl https://api.example.com/v1/endpoint \
-H "Content-Type: application/json" \
-H "Authorization: Bearer $API_TOKEN" \
-d '{
"param1": "value1",
"param2": "value2"
}'
```
Utiliser `op inject` pour les secrets:
```bash
# ~/.zshrc
op inject --in-file "${HOME}/.dotfiles/secrets.zsh" | while read -r line; do
eval "$line"
done
```
```bash
# ~/.dotfiles/secrets.zsh
export NOTION_API_KEY="op://private/notion.so/api-token"
export TEST_PYPI_TOKEN="op://private/test.pypi.org/token"
```
Voir aussi https://samedwardes.com/blog/2023-11-28-1password-for-secret-dotfiles-update/
Autre méthode:
```bash
export NOTION_API_KEY=$(op read "op://private/notion.so/api-token)
```
https://nandovieira.com/using-1password-cli-to-avoid-hardcoded-secrets-in-your-terminal-profile
https://blog.gruntwork.io/how-to-securely-store-secrets-in-1password-cli-and-load-them-into-your-zsh-shell-when-needed-dd7a716506c8
https://dev.to/hacksore/using-1password-cli-for-secrets-locally-326e
```bash
docker login -u $(op read op://prod/docker/username) -p $(op read op://prod/docker/password)
```

27
docs/Divers/GPG.md Normal file
View File

@@ -0,0 +1,27 @@
# GPG Keys
##### Lister les clés GPG publiques:
```bash
gpg --list-secret-keys --keyid-format LONG
/Users/yourusername/.gnupg/pubring.kbx
--------------------------------------
sec rsa4096/<key-id> 2021-01-01 [SC]
ABCD1234EFGH5678IJKL91011MNOP1213
uid [ultimate] Your Name <your.email@example.com>
ssb rsa4096/9876ZYXWVUTS5432 2021-01-01 [E]
```
Le Key ID est: ABCD1234EFGH5678IJKL91011MNOP1213
##### Afficher la clé publique:
```bash
gpg --armor --export ABCD1234EFGH5678IJKL91011MNOP1213
```

98
docs/Divers/Untitled.md Normal file
View File

@@ -0,0 +1,98 @@
### Pushover
```bash
curl -s \
--form-string "token=APP_TOKEN" \
--form-string "user=USER_KEY" \
--form-string "message=here is an image attachment" \
-F "attachment=@image.jpg" \
https://api.pushover.net/1/messages.json
```
```bash
# A ajouter dans .zshrc
function push {
curl -s -F "token=YOUR_TOKEN_HERE" \
-F "user=YOUR_USER_KEY_HERE" \
-F "title=YOUR_TITLE_HERE" \
-F "message=$1" https://api.pushover.net/1/messages.json
}
```
```bash
send_pushover_notification() {
echo -e "Sending Pushover notification ..."
curl -s -F "token=$BASH_APP" \
-F "user=$USER_KEY" \
-F "title=$1" \
-F priority=2 \
-F html=1 \
-F retry=60 \
-F expire=86400 \
-F "message=$2" https://api.pushover.net/1/messages.json
[ $? -eq 0 ] && echo -e "${greenbold}Pushover notification sent successfully !${reset}" || echo -e "${redbold}error sending Pushover notification !${reset}"
}
```
```bash
curl -s -F "token=$BASH_APP" -F "user=$USER_KEY" -F "title=Salut" -F "message=Bien" https://api.pushover.net/1/messages.json
# token: "API token" (Créer une application)
# user: "Your User Key"
# Message avec des tags HTML:
curl -s -F "user=$USER_KEY" -F "title=Great Title" -F "html=1" -F "token=$BASH_APP" -F "message='message<b> de</b> test'" https://api.pushover.net/1/messages.json
{"status":1,"request":"4b6b1655-1276-4c7d-932f-7baf0b93e5dc"}%
msg="'This is a <b>HTML</b> test'"
./pushover.sh -a "bash" -m "$msg" -f 1
```
```bash
# push a notification to your phone. can be handy if you're running a
# build and you want to be notified when it's finished.
push() {
curl -s -F "token=PUSHOVER_TOKEN" \
-F "user=PUSHOVER_USER" \
-F "title=terminal" \
-F "message=$1" https://api.pushover.net/1/messages.json > /dev/null 2>&1
}
command_to_run && push "yes! command finished successfully!" || push "awww man! something failed :-("
```
[send the IP of my Raspberry Pi via Pushover](https://gist.github.com/PJUllrich/e95baa0d718e55a6c67f85cd8e53dabe)
### nfty
```bash
curl -H "X-Priority: 4" -d "yo" https://notif.maboiteverte.fr/pihole
```
### gotify
```bash
send_gotify_notification() {
now=$(date +"%d-%m-%Y %T")
gotify_server="https://gotify.maboiteverte.fr"
TITLE="Pi-hole on $host update"
MESSAGE="**A new version of Pi-hole is available:**\n\n $msg_md\n\n $infos\n\n Please run *pihole -up* on $host to update !"
PRIORITY=8
URL="$gotify_server/message?token=$token_gotify&?format=markdown"
echo -e "Sending notification to $gotify_server ..."
curl -s -S --output /dev/null --data '{"message": "'"${MESSAGE}"'", "title": "'"${TITLE}"'", "priority":'"${PRIORITY}"', "extras": {"client::display": {"contentType": "text/markdown"}}}' -H 'Content-Type: application/json' "$URL"
[ $? -eq 0 ] && echo -e "${greenbold}Gotify notification sent successfully !${reset}" || echo -e "${redbold}error sending Gotify notification !${reset}"
}
```

View File

@@ -104,6 +104,12 @@ echo -e "\e[1;34m $dd $dh $dm $ds \e[0m"
END_COMMENT
```
```bash
: '
comment
'
```
#### Créer une playlist .m3u:

View File

@@ -0,0 +1,96 @@
```bash
$ curl -w %{certs} https://nextcloud.photos-nas.ovh/ --silent -o /dev/null | grep -Ei "^(start|expire) date:" | head -n 2
Start date:Jul 10 14:55:12 2024 GMT
Expire date:Oct 8 14:55:11 2024 GMT
# Issuer:C = US, O = Let's Encrypt, CN = R10
# X509v3 Subject Alternative Name:DNS:*.photos-nas.ovh, DNS:photos-nas.ovh
```
```bash
$ curl --insecure -vvI https://nextcloud.photos-nas.ovh 2>&1 | grep "expire date" | awk '{print $4,$5,$6,$7,$8,$9}'
Oct 8 14:55:11 2024 GMT
```
```bash
$ nmap -p 443 --script ssl-cert nextcloud.photos-nas.ovh
Starting Nmap 7.95 ( https://nmap.org ) at 2024-07-26 16:34 CEST
Nmap scan report for nextcloud.photos-nas.ovh (192.168.2.57)
Host is up (0.0030s latency).
rDNS record for 192.168.2.57: photos-nas.fr
PORT STATE SERVICE
443/tcp open https
| ssl-cert: Subject: commonName=photos-nas.ovh
| Subject Alternative Name: DNS:*.photos-nas.ovh, DNS:photos-nas.ovh
| Issuer: commonName=R10/organizationName=Let's Encrypt/countryName=US
| Public Key type: rsa
| Public Key bits: 4096
| Signature Algorithm: sha256WithRSAEncryption
| Not valid before: 2024-07-10T14:55:12
| Not valid after: 2024-10-08T14:55:11
| MD5: 2051:3683:b6ef:060e:073b:58c9:ea6d:4c48
|_SHA-1: 4abc:421e:08eb:fd29:8c9e:5ed2:1510:422f:aea4:2100
Nmap done: 1 IP address (1 host up) scanned in 0.13 seconds
```
```bash
$ showcert nextcloud.photos-nas.ovh
IP: 192.168.2.57
Names: photos-nas.ovh *.photos-nas.ovh
notBefore: 2024-07-10 14:55:12 (16 days old)
notAfter: 2024-10-08 14:55:11 (73 days left)
Issuer: C=US O=Let's Encrypt CN=R10
Tags: [CHAIN-VERIFIED]
```
```bash
$ echo | openssl s_client -connect nextcloud.photos-nas.ovh:443 2>/dev/null | openssl x509 -noout -enddate
notAfter=Oct 8 14:55:11 2024 GMT
ssl_expiry () {
echo | openssl s_client -connect ${1}:443 2> /dev/null | openssl x509 -noout -enddate
}
echo | openssl s_client -connect nextcloud.photos-nas.ovh:443 2>/dev/null | openssl x509 -noout -dates -issuer -subject
notBefore=Jul 10 14:55:12 2024 GMT
notAfter=Oct 8 14:55:11 2024 GMT
issuer=C=US, O=Let's Encrypt, CN=R10
subject=CN=photos-nas.ovh
```
```bash
keytool -printcert -sslserver nextcloud.photos-nas.ovh:443
The operation couldnt be completed. Unable to locate a Java Runtime.
Please visit http://www.java.com for information on installing Java.
```
```bash
openssl s_client -showcerts -connect nextcloud.photos-nas.ovh:443
Connecting to 192.168.2.57
CONNECTED(00000005)
depth=2 C=US, O=Internet Security Research Group, CN=ISRG Root X1
verify return:1
depth=1 C=US, O=Let's Encrypt, CN=R10
verify return:1
depth=0 CN=photos-nas.ovh
verify return:1
---
Certificate chain
0 s:CN=photos-nas.ovh
i:C=US, O=Let's Encrypt, CN=R10
a:PKEY: rsaEncryption, 4096 (bit); sigalg: RSA-SHA256
v:NotBefore: Jul 10 14:55:12 2024 GMT; NotAfter: Oct 8 14:55:11 2024 GMT
-----BEGIN CERTIFICATE-----
.../...
```

View File

@@ -0,0 +1,43 @@
# Cryptage
### openssl
#### Crypter un mot-de-passe:
```bash
echo 'rusty!herring.pitshaft' | openssl enc -aes-256-cbc -md sha512 -a -pbkdf2 -iter 100000 -salt -pass pass:'sjkXF*4kX.@9mh-ut8y.'
U2FsdGVkX1/4rGXLZs9q1GJk7lQsE0gLNecO4BzPFmg9YfIyrn7QmdScn9Jb907G
echo 'rusty!herring.pitshaft' | openssl enc -aes-256-cbc -md sha512 -a -pbkdf2 -iter 100000 -salt -pass pass:'sjkXF*4kX.@9mh-ut8y.' > .secret_vault.txt
```
#### Décrypter un mot-de-passe:
```bash
echo "U2FsdGVkX199dZHjA0wtjtt0OapR8EOpVwZ5mPqN3JJd40yhCS3fYYxEflQTXTwr" | openssl enc -aes-256-cbc -md sha512 -a -d -pbkdf2 -iter 100000 -salt -pass pass:'sjkXF*4kX.@9mh-ut8y.'
rusty!herring.pitshaft
cat .secret_vault.txt | openssl enc -aes-256-cbc -md sha512 -a -d -pbkdf2 -iter 100000 -salt -pass pass:'sjkXF*4kX.@9mh-ut8y.'
```
```bash
echo "U2FsdGVkX199dZHjA0wtjtt0OapR8EOpVwZ5mPqN3JJd40yhCS3fYYxEflQTXTwr" | openssl enc -aes-256-cbc -md sha512 -a -d -pbkdf2 -iter 100000 -salt
enter AES-256-CBC decryption password:
rusty!herring.pitshaft
```
#### Crypter un mot-de-passe:
```bash
```

View File

@@ -1,4 +1,4 @@
# json
# json (jq)
@@ -79,3 +79,9 @@ $ jq '.english.adjective[1]' file.js
good
```
### Loop
https://unix.stackexchange.com/questions/732602/create-a-json-using-bash-script

91
docs/Divers/bash/loop.md Normal file
View File

@@ -0,0 +1,91 @@
# Les boucles
### For
```bash
array=('item1' 'item2' 'item3')
for i in "${array[@]}"
do
echo "$i"
done
```
```bash
fruits=('apple' 'banana' 'cherry')
colors=('red' 'yellow' 'red')
for i in "${!fruits[@]}"
do
echo "The ${fruits[$i]} is ${colors[$i]}"
done
```
```bash
numbers=(1 2 3 4 5)
for num in "${numbers[@]}"
do
if ((num % 2 == 0))
then
echo "$num is even"
else
echo "$num is odd"
fi
done
```
```bash
declare -A fruits
fruits=( [apple]='red' [banana]='yellow' [cherry]='red' )
for fruit in "${!fruits[@]}"
do
echo "The $fruit is ${fruits[$fruit]}"
done
```
### While
```bash
fruits=('apple' 'banana' 'cherry')
index=0
while [ $index -lt ${#fruits[@]} ]
do
echo "I like ${fruits[$index]}"
((index++))
done
```
### Until
```bash
numbers=(1 2 3 4 5)
index=0
until [ $index -eq ${#numbers[@]} ]
do
echo "Processing number: ${numbers[$index]}"
((index++))
done
```
### mapfile
```bash
mapfile -t lines < file.txt
for line in "${lines[@]}"
do
echo "Processing line: $line"
done
```

View File

@@ -0,0 +1,91 @@
# Send mail from command line
### ssmtp
Fichier de configuration:
```bash
nano /etc/ssmtp/ssmtp.conf
```
```bash
UseTTLS=YES
root=bruno@clicclac.info
mailhub=smtp.ovh.net:465
AuthUser=bruno@clicclac.info
AuthPass=xxxxxxxxxxxxxxx
FromLineOverride=YES
```
Email:
```
From: router@clicclac.info
To: bruno@clicclac.info
Subject: Sent from a terminal!
Your content goes here. Lorem ipsum dolor sit amet, consectetur adipisicing.
(Notice the blank space between the subject and the body.)
```
Envoyer l'email:
```bash
ssmtp bruno@clicclac.info < email.txt
```
### msmtp
Fichier de configuration:
```bash
nano ~/.msmtprc
```
```bash
defaults
tls on
tls_starttls off
logfile ~/.msmtp.log
account router
host ssl0.ovh.net
port 465
protocol smtp
from router@clicclac.info
from_full_name DS923
auth on
user router@clicclac.info
#passwordeval gpg --no-tty -q -d ~/.msmtp-password.gpg
password xxxxxxxxxxxxxxxxxxx
account default : router
```
```bash
chmod 600 ~/.msmtprc
```
Email:
```
From: router@clicclac.info
To: bruno@clicclac.info
Subject: Sent from a terminal!
Your content goes here. Lorem ipsum dolor sit amet, consectetur adipisicing.
(Notice the blank space between the subject and the body.)
```
Envoyer l'email:
```bash
msmtp -d -t < email.txt
```

View File

@@ -5,18 +5,27 @@
### Stocker un mot-de-passe, un token dans la Keychain:
```bash
$ security add-generic-password -s gh_access_tokens -a bruno -w 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
$ security add-generic-password -s gh_access_tokens -a bruno -w github_pat_xxyyzz
```
### Récupérer le mot-de-pass depuius la keychain:
```bash
$ security find-generic-password -w -s gh_access_tokens
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
github_pat_xxyyzz
$ gh_access_tokens=$(security find-generic-password -w -s gh_access_tokens)
```
### Supprimer un mot-de-passe, un token dans la Keychain:
```bash
$ security delete-generic-password -s gh_access_tokens -a bruno
keychain: "/Users/bruno/Library/Keychains/login.keychain-db"
.../...
password has been deleted.
```
https://scriptingosx.com/2021/04/get-password-from-keychain-in-shell-scripts/

View File

@@ -91,3 +91,23 @@ printf " %-11s %-35b \n" "Width:" "${red}${width}${reset}"
# escapes are of the form \0 or \0NNN
```
Séparateur de millier ( ' )
```bash
LC_ALL=en_US.UTF-8 printf "| %-15s | %-4s | %'9.2f | %'9.2f | %'14d \n" "$name" "$symbol" "$price" "$price2" "$volume_24h"
| Bitcoin | BTC | 94,289.45 | 97,788.72 | 50,548,740,838 |
LC_ALL=fr_FR.UTF-8 printf "| %-15s | %-4s | %'9.2f | %'9.2f | %'14d \n" "$name" "$symbol" "$price" "$price2" "$volume_24h"
| Bitcoin | BTC | 94405,00 | 97909,00 | 50580369313 |
printf "| %-15s | %-4s | %'9.2f | %'9.2f | %'14d \n" "$name" "$symbol" "$price" "$price2" "$volume_24h"
| Bitcoin | BTC | 94348.51 | 97837.60 | 50613422301 |
```
https://stackoverflow.com/questions/9374868/number-formatting-in-bash-with-thousand-separator

View File

@@ -4,8 +4,25 @@
### Concaténer une chaine:
##### Opérateur +=
```bash
upd+="$name "
string="Hello, "
string+="World"
echo "$string"
Hello, World
```
##### Les chaines côte à côte:
```bash
string1="Hello, "
string2="World"
string3="$string1$string2"
echo "$string3"
Hello, World
```
@@ -155,6 +172,42 @@ extension="${filename##*.}"
filename="${filename%.*}"
```
#### Sous-chaine entre 2 caractères ()
```bash
a="not_required=('drive' 'files' 'gitea' 'home-assistant' 'homebridge' 'portainer' 'tunes' 'wg' 'www')"
z=$(echo "$a" | awk -F"=" '{split($2, arr, "[()]"); print arr[2]}')
echo "$z"
'drive' 'files' 'gitea' 'home-assistant' 'homebridge' 'portainer' 'tunes' 'wg' 'www'
```
#### Entre 2 "" (quotes)
```bash
z=$(echo "$a" | awk -F'"' '{print $2}')
```
#### Extraire une sous-chaine par un délimiteur
```bash
$ string="Portez ce vieux whisky au juge blond qui fume"
# Supprime tout jusqu'à la 1ere occurence de ' '
$ echo "${string#* }"
ce vieux whisky au juge blond qui fume
# Supprime tout jusqu'à la dernière occurence de ' '
$ echo "${string##* }"
fume
# Supprime tout après la dernière occurence de ' '
echo ${string%' '*}
Portez ce vieux whisky au juge blond qui
```
### Remplacement de sous-chaine:
@@ -262,6 +315,20 @@ done <<< "$dependencies"
```
### Variable multi-line -> une ligne avec espaces
```bash
$ c=$(cat ~/.cryptos.yaml | yq '.lots[] | select(.quantity != 0) | .symbol')
BTC
ETH
$ c=$(cat ~/.cryptos.yaml | yq '.lots[] | select(.quantity != 0) | .symbol' | tr '\n' ' ')
BTC ETH
$ c=$(cat ~/.cryptos.yaml | yq '.lots[] | select(.quantity != 0) | .symbol' | tr -d '\n')
BTCETH
```
#### Regex:
@@ -381,6 +448,28 @@ BourgogneBurgundyBourgogneFranche-ComtéCôted'orcotedor
### URL
```bash
$ myurl='http://www.example.com/long/path/to/example/file.ext'
$ echo ${myurl##*/}
file.ext
awk -F / '{print $NF}'
$ echo ${myurl#*//}
www.example.com/long/path/to/example/file.ext
$ echo ${myurl%/*}
http://www.example.com/long/path/to/example
$ echo "$myurl" | grep -Eo '^http[s]?://[^/]+'
http://www.example.com
```
| **Parameter Expansion** | **Description** |
| --------------------------- | ------------------------------------------------------------ |
| ${variable:-value} | Si la variable est <u>unset</u> ou <u>undefined</u>, développez la **valeur**. |

View File

@@ -135,8 +135,13 @@ sirop
```bash
$ tableau_indi=()
# Ajouter un élément à la fin du tableau
$ tableau_indi+=('cinq')
$ tableau_indi+=('six')
$ tableau_indi=("new_element" "${tableau_indi[@]}")
# Ajouter un élément au début du tableau
$ tableau_indi=("new_element" "${tableau_indi[@]}")
```
```bash
@@ -260,6 +265,19 @@ $ echo ${#tableau_indi[@]}
3 # 3 éléments
```
```
echo ${cryptos[@]}
echo ${#cryptos[@]}
for val in ${!cryptos[@]}
do
echo "index = ${val} , value = ${cryptos[$val]}"
done
```
```bash
$ declare -A tableau_asso=( ['chene']="gland" ['erable']="sirop" ['hetre']="faine" )
@@ -349,6 +367,14 @@ $ if [[ -n "${tableau_asso[erable]}" ]]; then echo "Element is present"; else ec
Element is present
```
```bash
$ not_required=("drive" "files" "gitea" "home-assistant" "homebridge" "portainer" "wg" "yatch")
$ dynhost="pihole"
$ if [[ " ${not_required[*]} " != *"$dynhost"* ]]; then echo "$dynhost est absent"; else echo "$dynhost est present"; fi
pihole est absent
```
### Mettre la sortie d'une commande dans un tableau:
@@ -388,3 +414,54 @@ un deux # zsh: closing brace expected
```
### Ajouter le contenu d'un tableau à un autre tableau
```bash
$ array1+=(${array2[@]})
```
### Convertir une chaine en tableau
```bash
string="drive files gitea home-assistant homebridge portainer tunes wg www"
array=(${string})
echo "${array[@]}"
drive files gitea home-assistant homebridge portainer tunes wg www
```
### Trier un tableau
```bash
IFS=$'\n' sorted=($(sort <<<"${array[*]}"))
unset IFS
```
```bash
### Sort arrays alphabetically
IFS=$'\n'
NoUpdates=($(sort <<<"${NoUpdates[*]}"))
GotUpdates=($(sort <<<"${GotUpdates[*]}"))
unset IFS
```
### Copier un tableau
```bash
$ files=("a.txt" "b.txt")
$ filenames=("${files[@]}")
$ echo ${filenames[@]}
a.txt b.txt
```

View File

@@ -9,12 +9,67 @@
https://github.com/eggplants/deepl-cli
```bash
# Pas besoin d'API Key
$ deepl --fr fr --to en -s <<'A'
bonjour
A
Hello
```
#### deep-cli
https://github.com/kojix2/deepl-cli
##### Translate text
```bash
export DEEPL_AUTH_KEY=your_api_key_here
deepl -i "bonjour" -t EN
Hello
deepl -i "bonjour" -f FR -t EN
Hello
echo "Hello" | deepl -t FR
Bonjour
wp --help | deepl -t FR | less
man git | deepl -t FR | less
deepl --paste -t DE
deepl -t FR foo.txt
```
##### Translate documents
```bash
deepl doc your.pdf -t pt
# The translated document will be saved as your_PT.pdf
```
##### Liste les languages disponibles
```
# Source
deepl -f
# Target
deepl -t
```
##### Environment Variables
| Name | Description |
| ----------------- | -------------------------------- |
| DEEPL_AUTH_KEY | DeepL API authentication key |
| DEEPL_TARGET_LANG | Default target language |
| DEEPL_USER_AGENT | User-Agent |
| EDITOR | Text editor for editing glossary |
#### translate shell

View File

@@ -169,3 +169,19 @@ $ echo "$((5 / 2))"
2
```
##### Variables prédéfinies spéciales:
```bash
$# : nombre de paramètres de ligne de commande transmis au script.
$@ : tous les paramètres de ligne de commande transmis au script.
$ ? : le statut de sortie du dernier processus à exécuter.
$$ : ID de processus (PID) du script actuel.
$USER : le nom d'utilisateur de l'utilisateur exécutant le script.
$HOSTNAME : nom d'hôte de l'ordinateur exécutant le script.
$SECONDS : nombre de secondes pendant lesquelles le script a été exécuté.
$RANDOM : renvoie un nombre aléatoire.
$LINENO : renvoie le numéro de ligne actuel du script.
```

34
docs/Divers/certificat.md Normal file
View File

@@ -0,0 +1,34 @@
# Certificats
```bash
$ echo | openssl s_client -servername maboiteverte.fr -connect maboiteverte.fr:443 2>/dev/null | openssl x509 -noout -issuer -subject -dates
issuer=C = US, O = DigiCert Inc, OU = www.digicert.com, CN = Encryption Everywhere DV TLS CA - G2
subject=CN = *.maboiteverte.fr
notBefore=Jan 8 00:00:00 2024 GMT
notAfter=Jan 27 23:59:59 2025 GMT
```
```bash
$ echo | openssl s_client -servername maboiteverte.fr -connect maboiteverte.fr:443 2>/dev/null | openssl x509 -in /dev/stdin -noout -text
Certificate:
Data:
Version: 3 (0x2)
Serial Number:
06:68:76:d3:54:78:cf:e2:f8:d8:e0:16:89:c1:22:f7
Signature Algorithm: sha256WithRSAEncryption
Issuer: C = US, O = DigiCert Inc, OU = www.digicert.com, CN = Encryption Everywhere DV TLS CA - G2
Validity
Not Before: Jan 8 00:00:00 2024 GMT
Not After : Jan 27 23:59:59 2025 GMT
Subject: CN = *.maboiteverte.fr
Subject Public Key Info:
Public Key Algorithm: rsaEncryption
Public-Key: (2048 bit)
Modulus:
```

213
docs/Divers/chezmoi.md Normal file
View File

@@ -0,0 +1,213 @@
# chezmoi
https://www.chezmoi.io/reference/
#### Répertoire *destination*: `~ ($HOME)`
#### Répertoire source: `~/.local/share/chezmoi`
#### Fichier config: `~/.config/chezmoi/chezmoi.toml`
#### Choisir l'éditeur par défaut:
```bash
export EDITOR="bbedit --wait"
```
ou par le fichier de config de chezmoi `~/.config/chezmoi/chezmoi.toml`:
```bash
[edit]
command = "bbedit"
args = ["--wait"]
```
#### Editer un fichier:
1. **chez-moi edit**:
```bash
chezmoi edit $FILE
# Appliquer les changements à la fermeture de l'éditeur
chezmoi edit --apply $FILE
# Appliquer les changements quand on sauve le fichier
chezmoi edit --watch $FILE
```
2. **chezmoi cd et éditer dans le répertoire source directement:**
```bash
chezmoi cd
bbedit ~/.zhrcc
code ~/.zshrc
# Voir les différences
chezmoi diff
# Appliquer les changements
chezmoi apply
```
3. **chezmoi edit (sans argument):**
```bash
# ouvre le répertoire dans l'éditeur
chezmoi edit
```
4. **éditer le fichier dans le répertoire $HOME et le rajouter:**
```bash
bbedit ~/.zhrcc
code ~/.zshrc
chezmoi add $FILE
#ou
chezmoi re-add
```
5. **éditer le fichier dans le répertoire $HOME et puis fusionnez vos modifications avec l'état des sources en exécutant la commande chezmoi merge:**
```bash
bbedit ~/.zhrcc
code ~/.zshrc
chezmoi merge $FILE
```
##### Encrypter un fichier:
Ajouter au début du fichier de config:
```bash
encryption = "gpg"
[gpg]
recipient = "bruno@clicclac.info"
```
Puis
```bash
chezmoi add --encrypt .env
```
##### Commit et push automatique vers le dépôt:
Fichier de config:
```bash
[git]
autoCommit = true
commitMessageTemplate = "{{ promptString \"Commit message\" }}"
autoPush = true
```
##### Archiver tous les fichiers dots:
```bash
chezmoi archive
```
##### Tirer les derniers changements de votre repo et les appliquer:
```bash
chezmoi update
```
équivalent à `git pull --autostash --rebase && chezmoi apply`
##### Tirer les derniers changements de votre repo, voir les changements sans les appliquer:
```bash
chezmoi git pull -- --autostash --rebase && chezmoi diff
# si ok
chezmoi apply
```
### Commandes:
##### cat-config: afficher le fichier de config
```bash
chezmoi cat-config
encryption = "gpg"
[gpg]
recipient = "bruno@clicclac.info"
[edit]
command = "codium"
args = ["--wait"]
[git]
autoCommit = true
commitMessageTemplate = "{{ promptString \"Commit message\" }}"
#commitMessageTemplateFile = ".commit_message.tmpl"
autoPush = true
[diff]
pager = "delta"%
```
##### edit-config: éditer le fichier de config
```bash
chezmoi edit-config
```
##### status: afficher l'état des fichiers et scripts gérés par chezmoi (cf. git status)
```bash
chezmoi status
M .zsh/.zshrc
MM Library/Preferences
M rsync-list.txt
```
##### verify: vérifie que toutes les cibles correspondent à leur état.
```bash
chezmoi verify
```
##### update: récupère les modifications depuis le dépôt source et les applique.
```bash
chezmoi update
```
##### forget: supprimer les cibles de l'état source, autrement dit, cesser de les gérer.
```bash
chezmoi forget ~/.bashrc
```
##### destroy: supprimer la cible de l'état source, du répertoire de destination et de l'état.
```
!!! chezmoi destroy ~/.bashrc
```

View File

@@ -172,6 +172,20 @@ d2b481b91f10 portainer/portainer "/portainer" 9 mon
9763d849e8b1 portainer/portainer "/portainer" 9 months ago Exited (255) 9 months ago 0.0.0.0:9000->9000/tcp affectionate_antonelli
```
**Etat d'un container:**
```bash
$ docker inspect -f '{{.State.Status}}' mycontainer
# created : n'a jamais été lancé depuis qu'il a été crée (docker create)
# running : en cours d'exécution (docker start)
# restarting : redémarre (no, on-failure, always, unless-stopped)
# exited : le process a l'intérieur du container est terminé
# paused : les process sont suspendus pour une durée indéterminée (consommation mémoire identique à l'état 'running')
# dead : le container est non fonctionnel. Il ne peut pas être redémarré, mais juste supprimé.
```
**Liste de tous les containers:**
```bash
@@ -257,12 +271,24 @@ $ docker container rm $(docker container ls -aq)
$ sudo docker container ls --format 'table {{.ID}}\t{{.Names}}'
CONTAINER ID NAMES
700a2cd00cba lychee
$ sudo docker container exec 700a2cd00cba hostname
700a2cd00cba
$ sudo docker container exec 700a2cd00cba hostname -I
172.18.0.3
```
**Se connecter en root à un container:**
```bash
$ sudo docker container ls --format 'table {{.ID}}\t{{.Names}}'
CONTAINER ID NAMES
700a2cd00cba lychee
$ sudo docker exec -it 700a2cd00cba bash
```
**Créer et démarrer un container:**
```bash
@@ -418,6 +444,12 @@ WARNING! This will remove all custom networks not used by at least one container
Are you sure you want to continue? [y/N] y
```
**Voir les containers attaché à un réseau:**
```bash
$ docker network inspect --format '{{range $cid,$v := .Containers}}{{printf "%s: %s\n" $cid $v.Name}}{{end}}' "<network_id>"
```
## Stats:

View File

@@ -0,0 +1,83 @@
# Watchtower
#### Full Exclude
Exclure certains containers:
```yaml
# Dans les containers à ignorer:
version: "3"
services:
someimage:
container_name: someimage
labels:
- "com.centurylinklabs.watchtower.enable=false"
```
Inclure certains containers:
```yaml
# docker-compose de watchtower
WATCHTOWER_LABEL_ENABLE
```
```yaml
# Dans les containers à surveiller:
version: "3"
services:
someimage:
container_name: someimage
labels:
- "com.centurylinklabs.watchtower.enable=true"
```
#### Monitor only
Par container:
```yaml
version: "3"
services:
someimage:
container_name: someimage
labels:
- "com.centurylinklabs.watchtower.monitor-only=true"
```
Tous les containers:
```yaml
# docker-compose de watchtower
WATCHTOWER_MONITOR_ONLY
```
https://containrrr.dev/watchtower/
https://www.portainer.io/blog/using-env-files-in-stacks-with-portainer
##### Gotify:
```yaml
-e WATCHTOWER_NOTIFICATIONS=gotify \
-e WATCHTOWER_NOTIFICATION_GOTIFY_URL="https://my.gotify.tld/" \
-e WATCHTOWER_NOTIFICATION_GOTIFY_TOKEN="SuperSecretToken" \
-e WATCHTOWER_NOTIFICATION_GOTIFY_TLS_SKIP_VERIFY=true
```

141
docs/Divers/git/Untitled.md Normal file
View File

@@ -0,0 +1,141 @@
# Git : Divers
#### Trouver tous les repos:
```bash
find . -type d -exec [ -e '{}/.git' ] ';' -prune -print
```
#### Gitleaks
```bash
$ gitleaks detect -v
│╲
│ ○
○ ░
░ gitleaks
Finding: APIKEY="P500SaGgEA79l6wktSbakYGPjXfvAkTw0PMXykzKkrxLp"
Secret: P500SaGgEA79l6wktSbakYGPjXfvAkTw0PMXykzKkrxLp
RuleID: generic-api-key
Entropy: 4.631305
File: soco-cli-gui.sh
Line: 49
Commit: 8adf31b467bbe71d9a56b74761c40aeec5ceb171
Author: JohnDoe
Email: john@doe.com
Date: 2023-03-13T07:56:12Z
Fingerprint: 8adf31b467bbe71d9a56b74761c40aeec5ceb171:soco-cli-gui.sh:generic-api-key:49
Finding: APIKEY="P500SaGgEA79l6wktSbakYGPjXfvAkTw0PMXykzKkrxLp"
Secret: P500SaGgEA79l6wktSbakYGPjXfvAkTw0PMXykzKkrxLp
RuleID: generic-api-key
Entropy: 4.631305
File: soco-cli-gui.sh
Line: 1443
Commit: bdc25bc0a3eb61663a6437621f0ac3b7c41701ee
Author: JohnDoe
Email: john@doe.com
Date: 2023-03-11T14:57:24Z
Fingerprint: bdc25bc0a3eb61663a6437621f0ac3b7c41701ee:soco-cli-gui.sh:generic-api-key:1443
Finding: GITHUB_TOKEN=gdxfdo25vth9pvk3mev05s1z87y4t4s
Secret: gdxfdo25vth9pvk3mev05s1z87y4t4s
RuleID: generic-api-key
Entropy: 3.862815
File: soco-cli-gui.sh
Line: 17
Commit: e3fc8b675561b3d608e12a10cc1dd70241dcbc84
Author: JohnDoe
Email: john@doe.com
Date: 2021-01-16T11:04:35Z
Fingerprint: e3fc8b675561b3d608e12a10cc1dd70241dcbc84:soco-cli-gui.sh:generic-api-key:17
3:39PM INF 60 commits scanned.
3:39PM INF scan completed in 36.7ms
3:39PM WRN leaks found: 3
```
On indique les chaines à remplacer dans le fichier 'replace.txt':
```bash
~/Documents/Scripts/sonos main 15:39:25
$ echo '13314ba0099450eaa6c0b2233d0f6adde1f5c718==>GITHUB_TOKEN' >> replace.txt
```
```bash
~/Documents/Scripts/sonos main* 15:39:43
$ echo 'AIzaSyBtEqykacvWuWiLqq1-eIBZBrJzAYEx_xU==>GM_APIKEY' >> replace.txt
```
On initie le remplacement avec --force:
```bash
~/Documents/Scripts/sonos main* 15:39:49
$ git filter-repo --replace-text replace.txt
Aborting: Refusing to destructively overwrite repo history since
this does not look like a fresh clone.
(expected one remote, origin)
Please operate on a fresh clone instead. If you want to proceed
anyway, use --force.
```
```bash
~/Documents/Scripts/sonos main* 15:40:13
$ git filter-repo --replace-text replace.txt --force
Parsed 60 commits
New history written in 0.37 seconds; now repacking/cleaning...
Repacking your repo and cleaning out old unneeded objects
HEAD est maintenant à 0279b2d Rework alarms functions
Énumération des objets: 187, fait.
Décompte des objets: 100% (187/187), fait.
Compression par delta en utilisant jusqu'à 8 fils d'exécution
Compression des objets: 100% (111/111), fait.
Écriture des objets: 100% (187/187), fait.
Total 187 (delta 111), réutilisés 87 (delta 74), réutilisés du paquet 0 (depuis 0)
Completely finished after 0.83 seconds.
```
gitleaks ne détecte plus aucin leaks:
```bash
~/Documents/Scripts/sonos main* 15:40:22
$ gitleaks detect -v
│╲
│ ○
○ ░
░ gitleaks
3:40PM INF 60 commits scanned.
3:40PM INF scan completed in 29.8ms
3:40PM INF no leaks found
```
On pousse avec --force vers les remotes
```bash
~/Documents/Scripts/sonos main* 15:51:51
$ git push dsm923 main --force
Énumération des objets: 186, fait.
Décompte des objets: 100% (186/186), fait.
Compression par delta en utilisant jusqu'à 8 fils d'exécution
Compression des objets: 100% (73/73), fait.
Écriture des objets: 100% (186/186), 150.65 Kio | 75.32 Mio/s, fait.
Total 186 (delta 111), réutilisés 186 (delta 111), réutilisés du paquet 0 (depuis 0)
remote: Resolving deltas: 100% (111/111), done.
To dsm923e:/volume1/Repositories/repos/sonos.git
+ 56bbfb1...0279b2d main -> main (forced update)
```

View File

@@ -43,7 +43,7 @@ Sur le serveur (DS916), aller dans le dossier Repo:
```bash
dsm916> cd /volume1/Repositories/
dsm916> git init -bare wp2012.git
dsm916> git init --bare wp2012.git
```
```bash
@@ -152,7 +152,7 @@ Bash.git est un repo --bare sur le NAS.
Créer un répertoire contenant les données du dépôt (working tree) ainsi qu'un répertoire .git
```bash
$ git clone ssh://bruno@clicclac.synology.me:42666/volume1/Repositories/bash.git
$ git clone ssh://bruno@photos-nas.ovh:42667/volume1/Repositories/bash.git
$ cd bash
drwxr-xr-x 12 bruno staff 384 May 8 07:46 .git
@@ -571,6 +571,14 @@ mbv git@gitea.maboiteverte.fr:shell/kymsu_mbv.git (fetch)
mbv git@gitea.maboiteverte.fr:shell/kymsu_mbv.git (push)
```
[Push to multiple remotes at once](https://leighmcculloch.com/posts/git-push-to-multiple-remotes-at-once/)
https://gist.github.com/rvl/c3f156e117e22a25f242
[Pushing to multiple git remotes simultaneously](https://jeffkreeftmeijer.com/git-multiple-remotes/)
[Working with Git remotes and pushing to multiple Git repositories](https://jigarius.com/blog/multiple-git-remote-repositories)
### Etiquettes:

View File

@@ -24,12 +24,12 @@ https://docs.gitea.io/en-us/install-from-binary/#recommended-server-configuratio
#### Importer un repo local dans Gitea:
Dans Gitea, créer un nouveau dépôt (ne pas cocher Initialiser le dépôt): gitea_bash
Dans Gitea, créer un nouveau dépôt (ne pas cocher Initialiser le dépôt): wordpress
Dans le repo local, ajouter le dépôt gitea comme nouveau repo distant:
```bash
$ git remote add gitea https://clicclac.synology.me:3001/bruno/gitea_bash.git
$ git remote add origin https://gitea.photos-nas.ovh/shell/wordpress.git
```
Il ne reste plus qu' à pousser les commits sur le dépôt gitea:
@@ -40,3 +40,10 @@ $ git push gitea master
https://charlesreid1.github.io/setting-up-a-self-hosted-github-clone-with-gitea.html#gitea-pushing-local
#### Vérifier la clé ssh:
Configuration -> Clés SSH / GPG -> Vérifier
Il faut aller très vite, il y a un timeout.

View File

@@ -86,3 +86,91 @@ $ curl -s -H "Accept: application/vnd.github.v3+json" https://api.github.com/rep
$ curl -s -H "Accept: application/vnd.github.v3+json" https://api.github.com/repos/go-gitea/gitea/releases/latest | jq -r '.'
```
#### Repo privé
Sur Github.com, aller dans Réglages -> Personal access tokens
Créer un tokens (classic) avec repo, admin:org,admin:public_key,admin:repo_hook,admin:org_hook
```bash
$ curl -H 'Authorization: token ghp_xxx' \
-H 'Accept: application/vnd.github.v3.raw' \
-O \
-L https://api.github.com/repos/Bruno21/bashbirds/contents/bashbirds.sh
```
```bash
$ curl https://x-access-token:ghp_xxx@raw.githubusercontent.com/Bruno21/bashbirds/main/bashbirds.sh
```
### GitHub CLI
https://docs.github.com/en/github-cli/github-cli/quickstart
```bash
gh auth login
? What account do you want to log into? GitHub.com
? What is your preferred protocol for Git operations on this host? SSH
? Upload your SSH public key to your GitHub account? /Users/bruno/.ssh/id_rsa.pub
? Title for your SSH key: GitHub CLI
? How would you like to authenticate GitHub CLI? Login with a web browser
! First copy your one-time code: 2A9B-F28C
Press Enter to open github.com in your browser...
✓ Authentication complete.
- gh config set -h github.com git_protocol ssh
✓ Configured git protocol
✓ Uploaded the SSH key to your GitHub account: /Users/bruno/.ssh/id_rsa.pub
✓ Logged in as Bruno21
```
#### Créer un repo sur Github.com:
```bash
$ gh repo create Bruno21/bashbirds --private
✓ Created repository Bruno21/bashbirds on GitHub
https://github.com/Bruno21/bashbirds
```
#### Créer un repo sur Github.com (mode interactif):
Push an existing local repository to GitHub
```bash
$ gh repo create
? What would you like to do? Push an existing local repository to GitHub
? Path to local repository .
? Repository name bashbirds
? Description
? Visibility Private
✓ Created repository Bruno21/bashbirds on GitHub
https://github.com/Bruno21/bashbirds
? Add a remote? Yes
? What should the new remote be called? github
✓ Added remote git@github.com:Bruno21/bashbirds.git
? Would you like to push commits from the current branch to "github"? Yes
The authenticity of host 'github.com (140.82.121.4)' can't be established.
ED25519 key fingerprint is SHA256:+DiY3wvvV6TuJJhbpZisF/zLDA0zPMSvHdkr4UvCOqU.
This key is not known by any other names.
Are you sure you want to continue connecting (yes/no/[fingerprint])? yes
Warning: Permanently added 'github.com' (ED25519) to the list of known hosts.
Énumération des objets: 13, fait.
Décompte des objets: 100% (13/13), fait.
Compression par delta en utilisant jusqu'à 8 fils d'exécution
Compression des objets: 100% (13/13), fait.
Écriture des objets: 100% (13/13), 80.34 Kio | 5.36 Mio/s, fait.
Total 13 (delta 5), réutilisés 0 (delta 0), réutilisés du paquet 0 (depuis 0)
remote: Resolving deltas: 100% (5/5), done.
To github.com:Bruno21/bashbirds.git
* [new branch] HEAD -> plus
la branche 'plus' est paramétrée pour suivre 'github/plus'.
✓ Pushed commits to git@github.com:Bruno21/bashbirds.git
```

59
docs/Divers/pentest.md Normal file
View File

@@ -0,0 +1,59 @@
# pentest
### Outils
- Kali Linux est une distribution Linux intégrant tous les outils nécessaires à la réalisation de tests dintrusion. Plus de 300 outils de sécurité sont à la disposition du pentester qui peut ainsi vérifier les vulnérabilités dun système à une offensive. Tous nos outils favoris sont bien entendu inclus dans cette distribution et cest pourquoi nous la recommandons et nous lutilisons quotidiennement.
- [nmap](https://nmap.org/) est un scanner de ports. Il permet danalyser un serveur distant et de déterminer quels ports sont ouverts. Il lui sera également possible de recueillir des informations sur le système dexploitation utilisé ainsi que sur les services hébergés.
- [Metasploit](https://www.metasploit.com/) est conçu pour réaliser des tests de pénétration. Il fournit donc des outils pour détecter des vulnérabilités, fournir un maximum dinformations dessus et aider à les exploiter. Un framework de développement est disponible afin de pouvoir configurer et utiliser des « exploits » (morceau de code permettant dutiliser une vulnérabilité connue sur un système ou un logiciel) pour tenter de pénétrer un système cible.
- Wireshark
- John The Ripper
- Hashcat
- [Hydra](https://github.com/vanhauser-thc/thc-hydra) est un outil très performant pour réaliser des attaques en force brute. Il supporte de nombreux protocoles et teste toutes les combinaisons possibles de mots de passe afin dobtenir laccès à un système.
- Burp Suite
- [Zed Attack Proxy (ZAP)](https://www.zaproxy.org) est un outil open-source permettant de réaliser des tests de pénétration sur les applications web. Il se place entre lapplication web et le navigateur utilisé par le testeur. Il va ensuite intercepter puis inspecter tous les échanges ayant lieu entre le navigateur et lapplication web. Il va également modifier le contenu de certains messages avant de les faire parvenir à leur destinataire et de vérifier le comportement de lapplication.
- [sqlmap](https://sqlmap.org/) est un outil open-source ayant pour objectif dautomatiser des attaques par injection SQL sur des applications web. Il permet donc didentifier les vulnérabilités dune application ou dun site web vis-à-vis dune attaque par injection SQL.
- aircrack-ng
- Dirb
- [Nikto](https://cirt.net/nikto2) est un scanner de vulnérabilités open-source destiné plus particulièrement aux serveurs web. Il va notamment permettre de vérifier les versions des logiciels et des modules utilisés sur le serveur, mais également scanner larborescence des répertoires à la recherche dinformations sensibles.
- dir search
- [SSLScan](https://sourceforge.net/projects/sslscan/) est un outil léger et simple à utiliser permettant de scanner les requêtes échangées avec un service SSL (comme https par exemple, sécurisant bon nombre de sites web). Il permet ainsi dobtenir des informations sur le chiffrement et sur les certificats utilisés.
- [Social Engineer Toolkit (SET)](https://www.social-engineer.org/framework/se-tools/computer-based/social-engineer-toolkit-set/) est conçu spécifiquement pour faire de lingénierie sociale. Il est totalement configurable et permet entre autres choses de créer facilement des emails de phishing dans le cadre dun pentest.
- [theHarvester](https://pypi.org/project/theHarvester/) est également un outil de test de pénétration. Il permet de récupérer un grand nombre dinformations comme des noms de serveurs, noms de domaines, comptes emails, ports ouverts et même noms demployés sur le système dinformation dune entreprise. Pour cela, il base son analyse sur de nombreuses sources en accès public comme les moteurs de recherche.
- [OpenVas](https://www.openvas.org/) est lacronyme de « Open Vulnerability Assessment Scanner ». Comme son nom lindique, il sagit dun scanner de vulnérabilités. Il est capable de scanner tout matériel possédant une adresse IP et relié au réseau : poste de travail, serveur, routeur, pare-feu, smartphone, site web, objet connecté, poste téléphonique sur IP…
Le logiciel cartographie les cibles potentielles sur le réseau, détecte les ports accessibles, identifie les services actifs et leurs versions. A partir de ces informations, les différents éléments seront scannés à la recherche de vulnérabilités et un rapport sera généré.
https://www.lemondeinformatique.fr/actualites/lire-11-outils-pour-s-initier-au-pentest-80103.html
- [SearchSploit](https://www.exploit-db.com/searchsploit)
- [metasploit](https://www.metasploit.com)
OWASP

View File

@@ -144,7 +144,7 @@ Si WebTools ne fonctionne plus, il faut le [resetter](https://github.com/ukdtom/
guide.xml 100% 2792KB 92.8KB/s 00:30
```
### Logs:
@@ -156,6 +156,22 @@ Pour les logs des plug-ins, aller directement sur le serveur:
### Impossible d'indexer le contenu d'un dossier:
1. In DSM go to "Control Panel > Shared folder:.
2. Select the media folder you want Plex to have access to.
3. Click Edit.
4. Click on the Permissions tab.
5. In the dropdown change "Local users" to System internal user".
6. Find and select the PlexMediaServer user (or Plex user for DSM 6).
7. Tick the Read/Write permission box and click Save (or OK for DSM 6).
### Liens:

551
docs/Divers/rclone.md Normal file
View File

@@ -0,0 +1,551 @@
# rclone
### Mac -> pcloud
https://rclone.org/pcloud/
##### rclone config show:
```bash
rclone config show
[pcloud]
type = pcloud
hostname = eapi.pcloud.com
token = {"access_token":"zj6mZDnONSzyJXpmZpIYvXkZWhYVxcv5zNzbjHhUTF3FvzEuPkxy","token_type":"bearer","expiry":"0001-01-01T00:00:00Z"}
root_folder_id = 16135343175
[pcloud2]
type = pcloud
hostname = eapi.pcloud.com
token = {"access_token":"zj6mZDnONSzyJXpmZ28jB7kZgGoK40Ez6LVEVPUezEmUYXmKDzg7","token_type":"bearer","expiry":"0001-01-01T00:00:00Z"}
```
##### Copier un fichier sur pcloud:
```bash
# rclone copy <file> <remote:path>
~/.config/rclone
rclone copy rclone.conf pcloud:
rclone copy -vv pcloud:topgrade.toml .
```
##### Lister les fichiers sur pcloud:
```bash
rclone ls pcloud:
400 rclone.conf
```
##### root_folder_id: 16135343175
répertoire que rclone considère comme root sur le disque pCloud.
```bash
https://my.pcloud.com/#page=filemanager&folder=16135343175
```
### Mac -> Synology
https://rclone.org/sftp/
##### rclone config show
```bash
rclone config show
[ds923]
type = sftp
host = photos-nas.ovh
port = 42667
use_insecure_cipher = true
shell_type = unix
md5sum_command = /bin/md5sum
sha1sum_command = /bin/sha1sum
```
##### rclone lsf
```bash
rclone lsf ds923:/home
#recycle/
.bash_aliases
.bash_history
.bash_logout
.bashrc
.cache/
.config/
```
```bash
> rclone lsf ds923:/Films
#recycle/
11.6 (2013).m4v
12 years a slave.m4v
```
```bash
> rclone lsf ds923:/web
.well-known/
1-login.php
Locale/
_index.html
admin/
adminer/
```
##### rclone copy
```bash
~
rclone copy security.txt -vv ds923:/homes/bruno --sftp-path-override=/volume1/homes/bruno
# -vv verbose
rclone copy -vv ds923:/homes/bruno/security.txt --sftp-path-override=/volume1/homes/bruno .
```
### Mac -> iCloud
https://rclone.org/iclouddrive/
```
[iclouddrive]
type = iclouddrive
apple_id = bxxxxxxxxxxxx@orange.fr
password = Passw0rd4
cookies = X-APPLE-WEBAUTH-HSA-TRUST=2422434b85789793...
trust_token = HSARMTKNSRVXWFlaTNRjqj9rT3DEMu9UFBfRYvmzbJ3B2pWGryz46M....
```
```bash
rclone config reconnect iclouddrive:
```
L'erreur `Missing PCS cookies from the request` est due à la Protection Avancée des Données (Préférences iCloud). Pour utiliser rclone, il faut la désactiver.
### Mac -> Seafile
https://rclone.org/seafile/
```bash
[seafile923]
type = seafile
url = https://seafile.photos-nas.ovh
user = liste@blabla.info
pass =
2fa = true
library = Ma bibliothèque
auth_token = 71892276ff3cbd92ef86951c9b0939a4b7213286
```
```bash
rclone copy security.txt -vv seafile923:
rclone copy -vv seafile923:security.txt .
```
### Mac -> sur-le-sentier.fr
https://rclone.org/sftp/
```bash
rclone config show
[sls]
type = sftp
host = sur-le-sentier.fr
user = sentier
use_insecure_cipher = false
shell_type = unix
md5sum_command = md5sum
sha1sum_command = sha1sum
```
```bash
rclone copy security.txt sls:/var/www/vhosts/sur-le-sentier.fr
rclone copy sls:/var/www/vhosts/sur-le-sentier.fr/security.txt .
```
### Mac -> maboiteverte.fr
https://rclone.org/sftp/
```bash
rclone config show
[mbv]
type = sftp
host = maboiteverte.fr
use_insecure_cipher = false
shell_type = unix
md5sum_command = md5sum
sha1sum_command = sha1sum
```
```bash
rclone copy security.txt -vv mbv:/var/www/vhosts/maboiteverte.fr
rclone copy mbv:/var/www/vhosts/maboiteverte.fr/security.txt .
```
### Mac -> ovh
https://rclone.org/sftp/
```bash
rclone config show
[ovh]
type = sftp
host = ftp.cluster011.ovh.net
user = funnymac
shell_type = unix
md5sum_command = md5sum
sha1sum_command = sha1sum
```
```bash
rclone copy security.txt -vv ovh:/homez.528/funnymac
rclone copy ovh:/homez.528/funnymac/security.txt .
```
### Synology -> pCloud
https://github.com/ravem/synology-pcloud-and-rclone
### Commandes:
#### rclone about
Affiche les infos du serveur:
```
rclone about sls:
Total: 115.535 GiB
Used: 36.288 GiB
Free: 79.248 GiB
# Options:
--full affiche les valeurs en octets
--json affiche le résultat en json
```
#### rclone authorize
Pour autoriser un rclone distant ou fonctionnant sans interface graphique depuis un ordinateur équipé d'un navigateur, suivez les instructions de configuration de rclone.
#### rclone backend
Cette commande exécute une commande spécifique au backend.
```bash
rclone backend help sls:
2025/04/03 08:30:43 NOTICE: Failed to backend: sftp backend has no commands
rclone backend help pcloud:
2025/04/03 08:31:56 NOTICE: Failed to backend: pcloud backend has no commands
```
#### rclone bisync
Effectuer une synchronisation bidirectionnelle entre deux chemins.
#### rclone cat
Envoie des fichiers à la sortie standard.
```bash
rclone cat mbv:/var/www/vhosts/maboiteverte.fr/.bashrc
```
#### rclone check
Vérifie que les fichiers de la source et de la destination correspondent.
#### rclone checksum
Vérifie les fichiers de la destination par rapport à un fichier SUM.
#### rclone cleanup
Nettoyez la destination si possible. Videz la corbeille ou supprimez les anciennes versions de fichiers. Non pris en charge par toutes les destination.
#### rclone config
Lancez une session de configuration interactive pour créer de nouveaux référentiels distants et gérer ceux existants.
#### rclone copy
Copie des fichiers de la source à la destination, en ignorant les fichiers identiques.
#### rclone copyto
#### rclone copyurl
Copie le contenu de l'URL fournie dans dest:path.
#### rclone cryptcheck
Cryptcheck contrôle l'intégrité d'un système distant chiffré.
#### rclone cryptdecode
Cryptdecode renvoie les noms de fichiers non chiffrés.
#### rclone dedupe
Recherche interactive des noms de fichiers en double et suppression/renommage.
#### rclone delete
Supprime les fichiers du chemin d'accès. Contrairement à purge, il obéit aux filtres include/exclude et peut donc être utilisé pour supprimer des fichiers de manière sélective.
```bash
# Fichier > 100M
rclone --min-size 100M lsl sls:/var/www/vhosts/sur-le-sentier.fr
# Suppression à vide
rclone --dry-run --min-size 100M delete sls:/var/www/vhosts/sur-le-sentier.fr
# Suppression interactive
rclone --interactive --min-size 100M delete sls:/var/www/vhosts/sur-le-sentier.fr
```
#### rclone deletefile
Supprime un seul fichier d'un site distant. Contrairement à delete, il ne peut pas être utilisé pour supprimer un répertoire et n'obéit pas aux filtres include/exclude.
```bash
rclone --interactive delete sls:/var/www/vhosts/sur-le-sentier.fr/.bashrc
```
#### rclone gendocs
Produit la documentation markdown pour rclone dans le répertoire fourni.
```bash
rclone gendocs rclone
```
#### rclone hashsum
Génère un fichier de hachage (md5, sha1, whirlpool, crc32, sha256) pour tous les objets du répertoire.
```
md5, sha1, whirlpool, crc32, sha256
```
```bash
rclone hashsum MD5 sls:/var/www/vhosts/sur-le-sentier.fr/logs/
93193fb044ccc71cb27203532ac334b8 error_log
d41d8cd98f00b204e9800998ecf8427e access_log
73abaaade6cd1c0a40162014867261f9 proxy_error_log
0801b1b10ac5ff6adb9e7bc8fe3647ad access_ssl_log
```
#### rclone link
Générer un lien public vers le fichier/dossier.
```bash
rclone link sls:/var/www/vhosts/sur-le-sentier.fr/logs/
2025/04/03 11:04:55 NOTICE: Failed to link: sftp://sentier@sur-le-sentier.fr:22//var/www/vhosts/sur-le-sentier.fr/logs/ doesn't support public links
rclone link pcloud:
https://e.pcloud.link/publink/show?code=kZyR1dZalrSWYGJiKYObHx04EzVw8U4hxik
```
#### rclone listremotes
Liste toutes les destinations présentes dans le fichier de configuration et définies dans les variables d'environnement.
```bash
rclone listremotes
pcloud:
pcloud2:
ds923:
iclouddrive:
seafile923:
sls:
mbv:
ovh:
```
#### rclone md5sum
Produit un fichier md5sum pour tous les objets du chemin.
Equivalent à `rclone hashsum MD5 remote:path`
#### rclone mkdir
Créez le chemin d'accès s'il n'existe pas déjà.
```bash
rclone mkdir sls:/var/www/vhosts/sur-le-sentier.fr/mkdir/
```
#### rclone mount
Rclone mount permet à Linux, FreeBSD, macOS et Windows de monter n'importe quel système de stockage en nuage de Rclone en tant que système de fichiers avec FUSE.
```
```
#### rclone move
Déplace le contenu du répertoire source vers le répertoire de destination.
```bash
rclone move rclone/ sls:/var/www/vhosts/sur-le-sentier.fr/rclone/
# Supprimer les dossiers vides
rclone move --delete-empty-src-dirs rclone/ sls:/var/www/vhosts/sur-le-sentier.fr/rclone/
```
#### rclone moveto
Déplacer un fichier ou un répertoire de la source vers la destination.
#### rclone ncdu
Explorer une destination avec une interface utilisateur textuelle.
```bash
rclone ncdu sls:/var/www/vhosts/sur-le-sentier.fr/
```
#### rclone purge
Retirer le chemin d'accès et tout son contenu.
```bash
# Supprime le dossier rclone
rclone purge sls:/var/www/vhosts/sur-le-sentier.fr/rclone/
```
#### rclone rcat
Copie l'entrée standard dans un fichier distant.
```bash
echo "Hello" | rclone rcat sls:/var/www/vhosts/sur-le-sentier.fr/test_rcat
```
#### rclone rmdir
Supprimer le répertoire vide au niveau du chemin d'accès.
```bash
rclone rmdir sls:/var/www/vhosts/sur-le-sentier.fr/empty_folder
```
#### rclone rmdirs
Supprimer les répertoires vides au niveau du chemin d'accès.
```bash
#sls
mkdir empty_folder/empty_folder2
rclone rmdir sls:/var/www/vhosts/sur-le-sentier.fr/empty_folder/
```
#### rclone sha1sum
Produit un fichier sha1sum pour tous les objets du chemin.
Equivalent à `rclone hashsum SHA1 remote:path`
#### rclone size
Affiche la taille totale et le nombre d'objets situés dans le répertoire distant remote:path.
```bash
rclone size --max-depth 1 sls:/var/www/vhosts/sur-le-sentier.fr/
Total objects: 14
Total size: 13.468 MiB (14121727 Byte)
```
#### rclone sync
Rendre la source et la destination identiques, en ne modifiant que la destination.
```bash
rclone sync --interactive rclone sls:/var/www/vhosts/sur-le-sentier.fr/rclone/
```
#### rclone tree
Liste le contenu d'un fichier distant de la même manière que la commande unix tree.
```bash
rclone tree sls:/var/www/vhosts/sur-le-sentier.fr/httpdocs/
/
├── Locale
│ ├── de_DE
│ │ └── LC_MESSAGES
│ │ ├── sentier.mo
│ │ └── sentier.po
│ ├── en_US
│ │ └── LC_MESSAGES
│ │ ├── sentier.mo
│ │ └── sentier.po
│ ├── es_ES
│ │ └── LC_MESSAGES
│ │ ├── sentier.mo
```
#### rclone version
Affiche le numéro de version.
```bash
rclone version --check
yours: 1.69.1
latest: 1.69.1 (released 2025-02-14)
beta: 1.70.0-beta.8641.839eef0db (released 2025-03-26)
upgrade: https://beta.rclone.org/v1.70.0-beta.8641.839eef0db
rclone version
rclone v1.69.1
- os/version: darwin 15.3.2 (64 bit)
- os/kernel: 24.3.0 (arm64)
- os/type: darwin
- os/arch: arm64 (ARMv8 compatible)
- go/version: go1.24.0
- go/linking: dynamic
- go/tags: none
```

View File

@@ -0,0 +1,30 @@
# Asus ZenWifi Pro ET12
#### Afficher /éteindre les LEDs
AiMesh -> Topologie -> Accueil -> Gestion -> Bouton LED On/Off
Introduction du pare-feu sur le routeur ASUS:
https://www.asus.com/fr/support/FAQ/1013630/
Comment configurer le filtre des services réseaux
https://www.asus.com/fr/support/FAQ/1013636
Comment rendre mon routeur plus sécurisé
https://www.asus.com/fr/support/FAQ/1039292

View File

@@ -24,6 +24,24 @@ xigmanas.home (192.168.1.250) at 1c:fd:8:70:20:f7 on en0 ifscope [ethernet]
broadcasthost (255.255.255.255) at ff:ff:ff:ff:ff:ff on en0 ifscope [ethernet]
```
### Trouver les devices sur un réseau:
https://www.blackmoreops.com/2015/12/31/use-arp-scan-to-find-hidden-devices-in-your-network/
https://github.com/royhills/arp-scan
https://github.com/royhills/arp-scan/wiki/arp-scan-User-Guide
```bash
$ sudo arp-scan --interface=en0 --localnet
Interface: en0, type: EN10MB, MAC: 50:ed:3c:1f:98:06, IPv4: 192.168.129.54
Starting arp-scan 1.10.0 with 256 hosts (https://github.com/royhills/arp-scan)
523 packets received by filter, 0 packets dropped by kernel
Ending arp-scan 1.10.0: 256 hosts scanned in 1.880 seconds (136.17 hosts/sec). 0 responded
```
### nmap:

View File

@@ -1,4 +1,4 @@
# Asus
# Asus RT-AC88U @ Merlin
@@ -183,6 +183,20 @@ sh /jffs/scripts/firewall uninstall
#### Reset usine
1. Éteignez le routeur.
2. Appuyez et maintenez le bouton "WPS" puis allumez le routeur.
3. Le voyant d'alimentation est allumé **(continue à tenir le bouton WPS****).**
4. Relâchez le **bouton WPS** lorsque le voyant d'alimentation commence à clignoter.
5. Le voyant d'alimentation cesse de clignoter et le routeur redémarre automatiquement.
https://www.asus.com/fr/support/FAQ/1039077
Login / password: admin / admin
#### AsusWRT-Merlin
[Forum](https://www.snbforums.com/forums/asuswrt-merlin.42/)
@@ -206,3 +220,6 @@ https://www.snbforums.com/threads/how-to-use-dnsmasq-conf-add.8785/
https://www.henriksommerfeld.se/firmware-update-notifications-for-my-asus-router/

View File

@@ -0,0 +1,187 @@
# Asus RT-AC88U @ openWrt
#### Reset usine
1. Éteignez le routeur.
2. Appuyez et maintenez le bouton "WPS" puis allumez le routeur.
3. Le voyant d'alimentation est allumé **(continue à tenir le bouton WPS****).**
4. Relâchez le **bouton WPS** lorsque le voyant d'alimentation commence à clignoter.
5. Le voyant d'alimentation cesse de clignoter et le routeur redémarre automatiquement.
https://www.asus.com/fr/support/FAQ/1039077
Login / password: admin / admin
#### Copier le firmware sur le routeur:
Désactiver le coupe-feu de macOS.
```bash
~/Downloads 3m 59s 18:21:52
tftp 192.168.1.1
tftp> mode binary
tftp> put openwrt-23.05.2-bcm53xx-generic-asus_rt-ac88u-squashfs.trx
Sent 9568256 bytes in 28.7 seconds
```
#### Connexion à OpenWrt:
```bash
root@192.168.1.1's password:
BusyBox v1.36.1 (2023-11-14 13:38:11 UTC) built-in shell (ash)
_______ ________ __
| |.-----.-----.-----.| | | |.----.| |_
| - || _ | -__| || | | || _|| _|
|_______|| __|_____|__|__||________||__| |____|
|__| W I R E L E S S F R E E D O M
-----------------------------------------------------
OpenWrt 23.05.2, r23630-842932a63d
-----------------------------------------------------
```
#### opkg
```bash
root@OpenWrt:~# opkg update
Downloading https://downloads.openwrt.org/releases/23.05.2/targets/bcm53xx/generic/packages/Packages.gz
Updated list of available packages in /var/opkg-lists/openwrt_core
root@OpenWrt:~# opkg find '*nano*'
nano - 7.2-2 - Nano is a small and simple text editor for use on the terminal.
root@OpenWrt:~# opkg install nano
Installing nano (7.2-2) to root...
```
#### Mise-à-jour
```bash
# opkg update && opkg install brcmfmac-firmware-4366b1-pcie
```
#### Infos
```bash
# uname -a
Linux OpenWrt 5.15.137 #0 SMP Tue Nov 14 13:38:11 2023 armv7l GNU/Linux
```
```bash
# cat /etc/os-release
NAME="OpenWrt"
VERSION="23.05.2"
ID="openwrt"
ID_LIKE="lede openwrt"
PRETTY_NAME="OpenWrt 23.05.2"
VERSION_ID="23.05.2"
HOME_URL="https://openwrt.org/"
BUG_URL="https://bugs.openwrt.org/"
SUPPORT_URL="https://forum.openwrt.org/"
BUILD_ID="r23630-842932a63d"
OPENWRT_BOARD="bcm53xx/generic"
OPENWRT_ARCH="arm_cortex-a9"
OPENWRT_TAINTS=""
OPENWRT_DEVICE_MANUFACTURER="OpenWrt"
OPENWRT_DEVICE_MANUFACTURER_URL="https://openwrt.org/"
OPENWRT_DEVICE_PRODUCT="Generic"
OPENWRT_DEVICE_REVISION="v0"
OPENWRT_RELEASE="OpenWrt 23.05.2 r23630-842932a63d"
```
```bash
# cat /proc/cpuinfo
processor : 0
model name : ARMv7 Processor rev 0 (v7l)
BogoMIPS : 1400.00
Features : half thumb fastmult edsp tls
CPU implementer : 0x41
CPU architecture: 7
CPU variant : 0x3
CPU part : 0xc09
CPU revision : 0
processor : 1
model name : ARMv7 Processor rev 0 (v7l)
BogoMIPS : 1400.00
Features : half thumb fastmult edsp tls
CPU implementer : 0x41
CPU architecture: 7
CPU variant : 0x3
CPU part : 0xc09
CPU revision : 0
Hardware : BCM5301X
Revision : 0000
Serial : 0000000000000000
```
### Liens
#### OpenWrt:
- [Table of Hardware](https://openwrt.org/toh/start)
- [Table of Hardware: devices with 16/128MB or more](https://openwrt.org/toh/views/toh_available_16128)
-
- [OpenWrt Support for Asus RT-AC88U](https://forum.openwrt.org/t/openwrt-support-for-asus-rt-ac88u/78635)
- [OpenWrt Wiki](https://openwrt.org/toh/asus/rt-ac88u)
- [Quick start guide for OpenWrt installation](https://openwrt.org/docs/guide-quick-start/start)
- [OpenWrt Factory Install](https://openwrt.org/docs/guide-quick-start/factory_installation)
#### Firmwares:
- [OpenWrt Firmware Selector](https://firmware-selector.openwrt.org/?version=23.05.2&target=bcm53xx%2Fgeneric&id=asus_rt-ac88u) -
- [Archive Firmwares](https://archive.openwrt.org/releases/23.05.2/targets/bcm53xx/generic/)
#### TFTP:
- [Installing openWrt via TFTP](https://openwrt.org/docs/guide-user/installation/generic.flashing.tftp)
- [Setting up a TFTP server for TFTP Recovery/Install](https://openwrt.org/docs/guide-user/troubleshooting/tftpserver)
- [Run a TFTP Server for Network Device Setups](https://rick.cogley.info/post/run-a-tftp-server-on-mac-osx/)
- [TFTP Server for macOS](https://macandegg.com/2022/02/tftp-server-for-macos/)
#### Livebox:
- [[La fibre.info] Remplacement de la Livebox par un routeur Openwrt](https://lafibre.info/remplacer-livebox/remplacement-de-la-livebox-par-un-routeur-openwrt-18-dhcp-v4v6-tv/)
- [[Github] Remplacer une Livebox par un routeur OpenWrt (ipv4,ipv6 & TV)](https://github.com/ubune/openwrt-livebox)
- [[La fibre.info] Index des solutions de remplacement de la Livebox](https://lafibre.info/remplacer-livebox/index-des-solutions-de-remplacement-de-la-livebox/)
#### Divers:
- [SNBForums (Asus)](https://www.snbforums.com/forums/asus-wireless.37/)
- [[La fibre.info] OpenWrt](https://lafibre.info/openwrt/)

View File

@@ -8,6 +8,30 @@
$ brew install wp-cli
```
Comme wp-cli n'est pas à jour sur Homebrew, on l'installe directement:
```bash
$ curl -O https://raw.githubusercontent.com/wp-cli/builds/gh-pages/phar/wp-cli.phar
$ chmod +x wp-cli.phar
$ mv wp-cli.phar $HOME/.local/bin/wp
```
```bash
$ brew install wp-cli-completion
# => /opt/homebrew/Cellar/wp-cli-completion/2.11.0/etc/bash_completion.d/wp
# Bash: /opt/homebrew/etc/bash_completion.d
# Zsh: /opt/homebrew/share/zsh-completions
# Bash: /opt/homebrew/share/bash-completion/completion
[[ -r $(brew --prefix)/Cellar/wp-cli-completion/2.11.0/etc/bash_completion.d/wp ]] && source $(brew --prefix)/Cellar/wp-cli-completion/2.11.0/etc/bash_completion.d/wp
```
#### Installer le script [**wp-cli bash completion**](https://github.com/wp-cli/wp-cli/raw/master/utils/wp-completion.bash)
@@ -135,3 +159,40 @@ WP-CLI project config:
WP-CLI version: 2.0.0
```
### 4. Installer wp-cli sur sur-le-sentier.fr
```bash
# !/usr/bin/env bash
# Télécharger
curl -O https://raw.githubusercontent.com/wp-cli/builds/gh-pages/phar/wp-cli.phar
# Test
php wp-cli.phar --info
chmod +x wp-cli.phar
sudo mv wp-cli.phar wp
# Test
./wp --info
# Ajouter dans .bash_aliases
alias wp='$HOME/wp'
# Installer les TAB completions
curl -O https://raw.githubusercontent.com/wp-cli/wp-cli/v2.6.0/utils/wp-completion.bash
echo "source ~/wp-completion.bash" >> .bashrc
```
```bash
# Mise-à-jour
wp cli update --nightly
wp cli update
```
https://make.wordpress.org/cli/handbook/references/shell-friends/

View File

@@ -2,7 +2,7 @@
wp super-cache
#### wp super-cache
Cette commande nécessite l'installation du package <u>wp-cli/wp-super-cache-cli</u>
@@ -40,13 +40,49 @@ SUBCOMMANDS
Lancer le cron immédiatement:
#### wp doctor
Installer le plugin wp-cli/doctor
```bash
wp package install wp-cli/doctor-command
```
```bash
$ wp doctor check --all
+----------------------------+---------+--------------------------------------------------------------------+
| name | status | message |
+----------------------------+---------+--------------------------------------------------------------------+
| core-verify-checksums | success | WordPress verifies against its checksums. |
| file-eval | success | All 'php' files passed check for 'eval\(.*base64_decode\(.*'. |
| cache-flush | success | Use of wp_cache_flush() not found. |
| autoload-options-size | success | Autoloaded options size (NAN) is less than threshold (900kb). |
| constant-savequeries-falsy | success | Constant 'SAVEQUERIES' is undefined. |
| constant-wp-debug-falsy | error | Constant 'WP_DEBUG' is defined 'true' but expected to be falsy. |
| core-update | success | WordPress is at the latest version. |
| cron-count | success | Total number of cron jobs is within normal operating expectations. |
| cron-duplicates | success | All cron job counts are within normal operating expectations. |
| option-blog-public | error | Site is private but expected to be public. |
| plugin-active-count | success | Number of active plugins (0) is less than threshold (80). |
| plugin-deactivated | warning | Greater than 40 percent of plugins are deactivated. |
| plugin-update | warning | 1 plugin has an update available. |
| theme-update | success | Themes are up to date. |
| php-in-upload | success | No PHP files found in the Uploads folder. |
| language-update | success | Languages are up to date. |
+----------------------------+---------+--------------------------------------------------------------------+
Error: 2 checks report 'error'.
```
#### Lancer le cron immédiatement:
```bash
$ wp cron event run --due-now
```
Liste des crons:
#### Liste des crons:
```bash
$ wp cron event list
@@ -68,7 +104,7 @@ $ wp cron event list
Changer l'URL de WordPress:
#### Changer l'URL de WordPress:
```bash
$ wp option update home 'http://example.com'
@@ -77,13 +113,13 @@ $ wp option update siteurl 'http://example.com'
Supprimer les transients expirés:
#### Supprimer les transients expirés:
```bash
$ wp transient delete --expired
```
Vider le cache:
#### Vider le cache:
```bash
$ wp cache flush

View File

@@ -0,0 +1,28 @@
# wp maintenance-mode
Aide:
```bash
$ wp help maintenance-mod
```
```bash
NAME
wp maintenance-mode
DESCRIPTION
Activates, deactivates or checks the status of the maintenance mode of a site.
SYNOPSIS
wp maintenance-mode <command>
SUBCOMMANDS
activate Activates maintenance mode.
deactivate Deactivates maintenance mode.
is-active Detects maintenance mode status.
status Displays maintenance mode status.
```

View File

@@ -0,0 +1,41 @@
# wp option
Aide:
```bash
wp help option
```
```bash
wp option
DESCRIPTION
Retrieves and sets site options, including plugin and WordPress settings.
SYNOPSIS
wp option <command>
SUBCOMMANDS
add Adds a new option value.
delete Deletes an option.
get Gets the value for an option.
get-autoload Gets the 'autoload' value for an option.
list Lists options and their values.
patch Updates a nested value in an option.
pluck Gets a nested value from an option.
set-autoload Sets the 'autoload' value for an option.
update Updates an option value.
```
Liste des options modifiables:
```bash
$ wp option list
```

View File

@@ -91,3 +91,7 @@ Success: 3 replacements to be made.
Pour chercher dans toutes les tables (également celles des plug-ins): `--all-tables`
Pour simuler: `--dry-run`
Pour créer un dump des modfications: `--export=/tmp/staging.sql`
https://www.it-connect.fr/wordpress-et-wp-cli-rechercher-et-remplacer-dans-la-base-de-donnees/

View File

@@ -363,6 +363,28 @@ export PATH="$HOME/Documents/venv/soco-cli/bin:$PATH"
#### zsh-fzf-history-search
[zsh-fzf-history-search.zsh](https://github.com/joshskidmore/zsh-fzf-history-search/blob/master/zsh-fzf-history-search.zsh)
```bash
# zsh-fzf-history-search
zinit ice lucid wait'0'
zinit light joshskidmore/zsh-fzf-history-search
```
#### fzf-zsh-plugin
https://github.com/unixorn/fzf-zsh-plugin
```bash
zinit load unixorn/fzf-zsh-plugin
```
#### web-search *
Lance une recherche depuis le terminal.

View File

@@ -0,0 +1,143 @@
# Zsh sans plugin manager ni OMZ
https://dev.to/hbenvenutti/using-zsh-without-omz-4gch
Au début du fichier .zshrc, insérer:
```bash
### ZSH HOME
export ZSH=$HOME/.zsh
autoload -Uz compinit
compinit
```
On insère les plugins à la fin du fichier .zshrc
```bash
# https://github.com/ikhurramraza/bol
~/.zsh/plugins
➜ git clone https://github.com/ikhurramraza/bol.git
# Inserer les citations dans /Users/bruno/.zsh/plugins/bol/quotes
# Ajouter au .zshrc
source $ZSH/plugins/bol/bol.plugin.zsh
```
```bash
# https://github.com/kazhala/dotbare
~/.zsh/plugins
➜ git clone https://github.com/kazhala/dotbare.git
# Ajouter au .zshrc
source $ZSH/plugins/dotbare/dotbare.plugin.zsh
```
```bash
# https://github.com/MichaelAquilina/zsh-you-should-use
~/.zsh/plugins
➜ git clone https://github.com/MichaelAquilina/zsh-you-should-use.git
# Ajouter au .zshrc
source $ZSH/plugins/dotbare/dotbare.plugin.zsh
```
```bash
# https://github.com/ohmyzsh/ohmyzsh/tree/master/plugins/history-substring-search
# https://github.com/zsh-users/zsh-history-substring-search
~/.zsh/plugins
➜ git clone https://github.com/zsh-users/zsh-history-substring-search
# Ajouter au .zshrc
source $ZSH/plugins/zsh-history-substring-search/zsh-history-substring-search.plugin.zsh
```
```bash
# https://github.com/zsh-users/zsh-autosuggestions
~/.zsh/plugins
➜ git clone https://github.com/zsh-users/zsh-autosuggestions.git
# Ajouter au .zshrc
source $ZSH/plugins/zsh-autosuggestions/zsh-autosuggestions.plugin.zsh
```
.zshrc
```bash
source $ZSH/plugins/bol/bol.plugin.zsh
source $ZSH/plugins/zsh-autosuggestions/zsh-autosuggestions.zsh
source $ZSH/plugins/zsh-history-substring-search/zsh-history-substring-search.plugin.zsh
fpath=($ZSH/plugins/zsh-completions/src $fpath)
# rm -f ~/.zcompdump; compinit
source $ZSH/plugins/fast-syntax-highlighting/fast-syntax-highlighting.plugin.zsh
```
https://github.com/ohmyzsh/ohmyzsh/tree/master/plugins/bbedit
https://github.com/ohmyzsh/ohmyzsh/tree/master/plugins/chezmoi
https://www.chezmoi.io/#what-does-chezmoi-do
https://github.com/ohmyzsh/ohmyzsh/tree/master/plugins/colored-man-pages
https://github.com/ohmyzsh/ohmyzsh/tree/master/plugins/colorize
https://github.com/ohmyzsh/ohmyzsh/tree/master/plugins/dotenv
https://github.com/ohmyzsh/ohmyzsh/tree/master/plugins/keychain
Coloration syntaxique dans nano:
```bash
curl https://raw.githubusercontent.com/scopatz/nanorc/master/install.sh | sh
```
```bash
~
cd
drwxr-xr-x@ - bruno staff 21 mar 21:01 21 mar 21:01  .nano
.rw-r--r--@ 2,7Ki bruno staff 21 mar 21:01 21 mar 21:01  .nanorc
```
.dircolors:
```bash
https://github.com/seebi/dircolors-solarized
```
Préférences:
| | |
| --------- | ----------------------------- |
| Multitail | /usr/local/etc/multitail.conf |
| | |
| | |

View File

@@ -90,7 +90,39 @@ fi
### Conditions:
#### -fichier
#### -variables:
Si la variable est déclarée (présente) : -v
```bash
API_KEY=
if [ -v API_KEY ]; then echo "La variable API_KEY existe"; fi
La variable API_KEY existe
if [ -v $API_KEY ]; then echo "La variable API_KEY existe"; fi
La variable API_KEY existe
```
```bash
# Déclarée => vide
if [ -n "$API_KEY" ]; then echo "La variable API_KEY n'est pas vide"; else echo "La variable API_KEY est vide"; fi
La variable API_KEY est vide
# Non déclarée => vide
if [ -n "$API_KEY2" ]; then echo "La variable API_KEY2 n'est pas vide"; else echo "La variable API_KEY2 est vide"; fi
La variable API_KEY2 est vide
```
```bash
# Non déclarée => vide
if [ -z "$API_KEY3" ]; then echo "La variable API_KEY est vide"; else echo "La variable API_KEY n'est pas vide"; fi
La variable API_KEY est vide
```
#### -dossiers:
Si le répertoire *<u>directory</u>* existe
@@ -110,6 +142,8 @@ if find "$local_path/node_modules" -mindepth 1 -maxdepth 1 | read; then echo "di
if [ -d "$local_path/node_modules" ] && [ -n "$(ls -A "$local_path/node_modules")" ]; then echo "dir not empty"; else echo "dir empty"; fi
```
#### -fichiers:
Si le fichier *<u>regularfile</u> (ni un blockspecialfile, ni un characterspecialfile, ni un directory)* existe
```bash

View File

@@ -14,7 +14,7 @@
------------- Minute (0 - 59)
```
https://crontab.guru
@@ -48,3 +48,32 @@ Le fichier crontab s'ouvre dans l'éditeur spécifié par la variable d'environn
https://linuxize.com/post/scheduling-cron-jobs-with-crontab/
#### Voir si le cron est exécuté:
On ouvre la crontab avec `# crontab -e`:
```bash
# On redirige la sortie et les erreurs du scripts vers les logs avec 2>&1 | logger -t up_pihole
# Ici le cron est exécuté toutes les 2 minutes.
*/2 * * * * bash /root/update_pihole.sh 2>&1 | logger -t up_pihole
```
On cherche les sorties du cron dans les logs:
```bash
# journalctl -f | grep 'up_pihole\|cron'
```
```bash
Aug 25 16:40:01 PiHole1 CRON[438003]: (root) CMD (/usr/bin/env > /root/cron-env)
Aug 25 16:40:01 PiHole1 CRON[438004]: (root) CMD (bash /root/update_pihole.sh 2>&1 | logger -t up_pihole)
Aug 25 16:40:01 PiHole1 CRON[438001]: pam_unix(cron:session): session closed for user root
Aug 25 16:40:02 PiHole1 up_pihole[438007]: Find Pi-hole update on PiHole1
Aug 25 16:40:02 PiHole1 up_pihole[438007]:
Aug 25 16:40:02 PiHole1 up_pihole[438007]: Pi-hole Current Last
Aug 25 16:40:02 PiHole1 up_pihole[438007]: Pi-hole v5.18.3 v5.18.3
```

View File

@@ -46,6 +46,30 @@ find /Users/bruno/Desktop/Juin -type f | xargs -L 1 bash -c '/opt/homebrew/opt/c
#### Rendre curl silencieux:
##### Masquer erreurs et barre de progression:
```bash
curl -s https://google.com
```
##### Complètement silencieux:
```bash
curl -s -o /dev/null https://google.com
```
##### Afficher juste les erreurs:
```bash
curl -S -s -o /dev/null https://google.com
```
#### Options:
```
-Z, --parallel
-#, --progress-bar

View File

@@ -8,3 +8,58 @@
```bash
# -d (delimiter) :
# -f (field) 1
$ cut -d':' -f1 /etc/passwd
nobody
root
daemon
_uucp
_taskgated
_networkd
cut -d':' -f1-3,5,6 /etc/passwd
```
```bash
# Tout sauf 7e champ
$ cut -d':' -f7 --complement /etc/passwd
```
```bash
# Remplace le séparateur ':' par ' '
$ cut -d':' -f7 --complement /etc/passwd --output-delimiter=' '
```
```bash
# 5e caractère
$ echo 'cut command' | cut -b 5
c
# 5 au 7e caractères
$ echo 'cut command' | cut -b 5-7
com
# 5 et 7e caractères
$ echo 'cut command' | cut -b 5,7
cm
# Du 5e à la fin
$ echo 'cut command' | cut -b 5-
command
# Du début au 5e
$ echo 'cut command' | cut -b -5
cut c
```

View File

@@ -32,6 +32,8 @@ pip/pip.conf
##### Recherche dans un répertoire particulier:
```bash
# Fichiers cachés (-H) dans le dossier .ssh
$ fd -HI 'id_*' .ssh
.ssh/id_ed25519
.ssh/id_ed25519.pub
@@ -72,6 +74,21 @@ $ fd -HI '.*[0-9]\.jpg$' ~
$ find ~ -iname '*[0-9].jpg'
```
##### Rechercher une extension:
```bash
# Rechercher les scripts bash (.sh) dans le répertoire courant
$ fd -e sh .
convert-videos-for-plex.sh
handbrake_for_plex.sh
keywords2insta.sh
macho.sh
```
##### Sans arguments:
```bash
@@ -96,6 +113,14 @@ $ fd -I -g php.ini /opt
/opt/homebrew/etc/php/8.0/php.ini
```
##### Rechercher plusieurs patterns:
```bash
$ fd -H ".env|docker-compose.yml"
.env
docker-compose.yml
```
#### Option:

View File

@@ -213,6 +213,20 @@ $ find /volume1/@appstore/PHP7.4/etc ! -perm 644
/volume1/@appstore/PHP7.4/etc/php/conf.d
```
Rechercher les fichiers avec permission 644 et les afficher avec ls:
```bash
$ find -maxdepth 1 -type f -perm -644 -ls
920871 148 -rw-r--r-- 1 sentier psacln 148546 Nov 14 16:37 ./12_2008.jpg
920718 276 -rw-r--r-- 1 sentier psacln 278540 Nov 14 16:37 ./7_2017.jpg
920675 120 -rw-r--r-- 1 sentier psacln 120837 Nov 14 16:37 ./5_2020.jpg
$ find -maxdepth 1 -type f -perm -644 -exec ls -la {} \;
-rw-r--r-- 1 sentier psacln 89608 Nov 14 16:37 ./11_2018.jpg
-rw-r--r-- 1 sentier psacln 258835 Nov 14 16:37 ./9_2007.jpg
-rw-r--r-- 1 sentier psacln 343441 Nov 14 16:37 ./7_2005.jpg
```
Rechercher les fichiers avec permission 777 et les modifiés en 644:
```bash

View File

@@ -228,6 +228,10 @@ https://reposhub.com/linux/shell-applications/lincheney-fzf-tab-completion.html
### Python
#### Activer un venv:
```bash
function activate-venv() {
local selected_env
@@ -239,3 +243,184 @@ function activate-venv() {
}
```
### Git
#### Git commit history
```bash
git log --oneline | fzf --preview 'git show --name-only {1}'
```
### Navigateurs
#### Recherche dans l'historique de Firefox:
```bash
cd ~/Library/Application\ Support/Firefox/Profiles/*.default-release
sqlite3 places.sqlite "SELECT url FROM moz_places" | fzf
```
#### Recherche dans les bookmarks de chrome:
```bash
b() {
bookmarks_path=~/Library/Application\ Support/Google/Chrome/Default/Bookmarks
jq_script='
def ancestors: while(. | length >= 2; del(.[-1,-2]));
. as $in | paths(.url?) as $key | $in | getpath($key) | {name,url, path: [$key[0:-2] | ancestors as $a | $in | getpath($a) | .name?] | reverse | join("/") } | .path + "/" + .name + "\t" + .url'
jq -r "$jq_script" < "$bookmarks_path" \
| sed -E $'s/(.*)\t(.*)/\\1\t\x1b[36m\\2\x1b[m/g' \
| fzf --ansi \
| cut -d$'\t' -f2 \
| xargs open
}
```
#### Recherche dans l'historique de Safari:
```bash
function sbh() {
local cols sep
cols=$(( COLUMNS / 3 ))
sep='{::}'
cp -f ~/Library/Safari/History.db /tmp/h
sqlite3 -separator $sep /tmp/h \
"select substr(id, 1, $cols), url
from history_items order by visit_count_score desc" |
awk -F $sep '{printf "%-'$cols's \x1b[36m%s\x1b[m\n", $1, $2}' |
fzf --ansi --multi | sed 's#.*\(https*://\)#\1#' | xargs open
}
fzf-safari-browser-history()
{
local cols sep
columns=$(( COLUMNS / 3 ))
separator='{::}'
sqlite3 -separator $separator $HOME/Library/Safari/History.db \
"select distinct substr(title, 1, $columns), url from history_items
inner join history_visits on history_items.id = history_visits.history_item
order by history_visits.visit_time desc;" |
awk -F $separator '{printf "%-'$columns's \x1b[36m%s\x1b[m\n", $1, $2}' |
fzf --ansi --multi | sed 's#.*\(https*://\)#\1#' | xargs open -a safari
}
```
### Terminal
#### Kill process:
```bash
kill -9 $(ps aux | fzf | awk '{print $2}')
```
#### File preview
```bash
fzf --preview 'bat --style=numbers --color=always --line-range :500 {}'
```
```bash
fd . '/opt/homebrew' | fzf --height=90% --reverse --preview 'cat {}' --query '_log'
```
### Docker
```bash
# Select a docker container to start and attach to
function da() {
local cid
cid=$(docker ps -a | sed 1d | fzf -1 -q "$1" | awk '{print $1}')
[ -n "$cid" ] && docker start "$cid" && docker attach "$cid"
}
```
```bash
# Select a running docker container to stop
function ds() {
local cid
cid=$(docker ps | sed 1d | fzf -q "$1" | awk '{print $1}')
[ -n "$cid" ] && docker stop "$cid"
}
```
```bash
# Select a docker container to remove
function drm() {
local cid
cid=$(docker ps -a | sed 1d | fzf -q "$1" | awk '{print $1}')
[ -n "$cid" ] && docker rm "$cid"
}
# Same as above, but allows multi selection:
function drm() {
docker ps -a | sed 1d | fzf -q "$1" --no-sort -m --tac | awk '{ print $1 }' | xargs -r docker rm
}
```
```bash
# Select a docker image or images to remove
function drmi() {
docker images | sed 1d | fzf -q "$1" --no-sort -m --tac | awk '{ print $3 }' | xargs -r docker rmi
}
```
### Homebrew Cask
```bash
# Install or open the webpage for the selected application
# using brew cask search as input source
# and display a info quickview window for the currently marked application
install() {
local token
token=$(brew search --casks "$1" | fzf-tmux --query="$1" +m --preview 'brew info {}')
if [ "x$token" != "x" ]
then
echo "(I)nstall or open the (h)omepage of $token"
read input
if [ $input = "i" ] || [ $input = "I" ]; then
brew install --cask $token
fi
if [ $input = "h" ] || [ $input = "H" ]; then
brew home $token
fi
fi
}
```
#### fzf-brew
```bash
antigen bundle thirteen37/fzf-brew
fbi: Fuzzy brew install
fbui: Fuzzy brew uninstall
fci: Fuzzy cask install
fcui: Fuzzy cask uninstall
```
https://github.com/thirteen37/fzf-brew?tab=readme-ov-file

View File

@@ -155,3 +155,9 @@ Afficher les 5 lignes qui suivent le motif recherché:
grep 'toto' -A5 fichier.txt
```
Afficher les 5 lignes qui précèdent le motif recherché:
```bash
grep 'toto' -B5 fichier.txt
```

View File

@@ -26,10 +26,21 @@ hello.txt link
```bash
~ ln -s link/hello.txt hello_you.txt
~ ls
~
$ ls
hello.txt link hello_you.txt
```
```bash
~/.local/bin
$ ln -s ~/Documents/Scripts/pihole/sync_pihole_lan.sh sync_pihole
```
```bash
~/.local/bin
$ ln -s ~/Documents/Scripts/bashbirds/bashbirds.sh bashbird
```
### hard link:

15
docs/Linux/rsync.md Normal file
View File

@@ -0,0 +1,15 @@
# rsync
### Copier un fichier sur le serveur:
```bash
rsync -e "ssh" -avz file.sh root@192.168.2.116:/root
# si port ssh # 22
rsync -e "ssh -p 51322" -avz file.sh root@192.168.2.116:/root
```

View File

@@ -136,3 +136,8 @@ PING 192.168.1.8 (192.168.1.8): 56 data bytes
bck-i-search: nas_
```
### Interactive shell
https://unix.stackexchange.com/questions/46789/check-if-script-is-started-by-cron-rather-than-invoked-manually

View File

@@ -28,12 +28,24 @@ bruno@macbook-pro:~$ sudo chmod 755 ~/.ssh
#### Copier un fichier:
#### Copier un fichier depuis le serveur:
```bash
macbook-pro:~ bruno$ ssh root@192.168.1.8 "cat prefs.tar.gz"> prefs.tar.gz
```
```bash
$ ssh pihole1 "cat update_pihole.sh"> update_pihole_pi1.sh
$ ssh pihole2 "cat update_pihole.sh"> update_pihole_pi2.sh
```
#### Envoyer un fichier sur le serveur:
```bash
$ cat update_pihole_pi1.sh | ssh pihole1 'cat > update_pihole.sh'
$ cat update_pihole_pi2.sh | ssh pihole2 'cat > update_pihole.sh'
```
#### Exécuter une (ou plusieurs) commande sur un serveur distant:

View File

@@ -36,3 +36,119 @@ Outils et Paramètres -> **Apparence de Plesk** -> Langues
https://romantic-nightingale.212-227-191-167.plesk.page:8447/select_components.html
### zstd: error 25 : Write error : No space left on device (cannot write compressed block)
```bash
Setting up initramfs-tools (0.140ubuntu13.4) ...
update-initramfs: deferring update (trigger activated)
Processing triggers for initramfs-tools (0.140ubuntu13.4) ...
update-initramfs: Generating /boot/initrd.img-5.15.0-119-generic
zstd: error 25 : Write error : No space left on device (cannot write compressed block)
E: mkinitramfs failure zstd -q -1 -T0 25
update-initramfs: failed for /boot/initrd.img-5.15.0-119-generic with 1.
dpkg: error processing package initramfs-tools (--configure):
installed initramfs-tools package post-installation script subprocess returned error exit status 1
Errors were encountered while processing:
initramfs-tools
E: Sub-process /usr/bin/dpkg returned an error code (1)
```
[Safely Removing Old Kernels](https://help.ubuntu.com/community/RemoveOldKernels)
https://github.com/rubo77/remove-old-kernels
La partition /boot est pleine:
```bash
$ df
Filesystem 1K-blocks Used Available Use% Mounted on
tmpfs 396900 1576 395324 1% /run
/dev/mapper/vg00-lv01 121147748 23482928 92641976 21% /
tmpfs 1984492 4 1984488 1% /dev/shm
tmpfs 5120 20 5100 1% /run/lock
/dev/vda1 498900 385456 76748 84% /boot
overlay 121147748 23482928 92641976 21% /var/lib/docker/overlay2/c3f6a94615fd3d71bf9a30c4270abda191b82d740ec423366e15aff59954215b/merged
overlay 121147748 23482928 92641976 21% /var/lib/docker/overlay2/32937ff042ce2d6af0ace187f7a06019f273c7c47b9e0a3d43bd08bd910d7bfb/merged
overlay 121147748 23482928 92641976 21% /var/lib/docker/overlay2/c3a657579b376ee7079682eb1668115a001aef79fcd5e6e48f14786e0187023f/merged
overlay 121147748 23482928 92641976 21% /var/lib/docker/overlay2/64e527239f42c7255578257a7830f97b3df5e9ffba7cc14fcf1f51556573dbcb/merged
overlay 121147748 23482928 92641976 21% /var/lib/docker/overlay2/1a8ee224695743c7a8b7e0c2aae7d71f76789fd09583513190196f52ebb6599a/merged
tmpfs 396896 8 396888 1% /run/user/10001
```
#### Kernel courant:
```bash
$ uname -r
5.15.0-119-generic
```
#### Liste des Kernels
```bash
$ dpkg -l | tail -n +6 | grep -E 'linux-image-[0-9]+'
rc linux-image-4.2.0-14-generic ## The oldest kernel in the database
## Status 'rc' means it's already been removed
ii linux-image-4.2.0-15-generic ## The oldest installed kernel. Removable.
## Status 'ii' means Installed.
ii linux-image-4.2.0-16-generic ## Another old installed kernel. Removable.
ii linux-image-4.2.0-18-generic ## Another old installed kernel. Removalbe.
ii linux-image-4.2.0-19-generic ## The previous good kernel. Keep.
ii linux-image-4.2.0-21-generic ## Same version as 'uname -r', package for the current
## kernel. DO NOT REMOVE.
iU linux-image-4.2.0-22-generic ## DO NOT REMOVE. Status 'iU' means it's not installed,
## but queued for install in apt.
## This is the package we want apt to install.
```
```bash
$ sudo update-initramfs -d -k 4.2.0-15-generic
```
```bash
$ sudo dpkg --purge linux-image-4.2.0-15-generic
```
Si erreur:
```bash
$ sudo dpkg --purge linux-image-4.2.0-15-generic linux-image-extra-4.2.0-15-generic
```
#### Maintenance
```bash
$ sudo apt-get autoremove --purge
```
### apt update
##### 4 packages can be upgraded. Run 'apt list --upgradable' to see them.
```bash
root@eloquent-benz:/etc/apt/keyrings# apt update
.../...
Reading package lists... Done
Building dependency tree... Done
Reading state information... Done
4 packages can be upgraded. Run 'apt list --upgradable' to see them.
```
```bash
root@eloquent-benz:/etc/apt/keyrings# apt list --upgradable
Listing... Done
python3-update-manager/jammy-updates,jammy-updates 1:22.04.20 all [upgradable from: 1:22.04.19]
ubuntu-advantage-tools/jammy-updates,jammy-updates 32.3.1~22.04 amd64 [upgradable from: 30~22.04]
ubuntu-pro-client-l10n/jammy-updates 32.3.1~22.04 amd64 [upgradable from: 30~22.04]
update-manager-core/jammy-updates,jammy-updates 1:22.04.20 all [upgradable from: 1:22.04.19]
```
```bash
root@eloquent-benz:/etc/apt/keyrings# apt-get install --only-upgrade python3-update-manager
```

View File

@@ -18,6 +18,35 @@ Certificat wildcard Let's Encrypt
```
Installer node.js mais ne pas l'activer. (sinon erreur Passenger pour WordPress)
#### Test certificat:
https://www.digicert.com/help/
https://tools.letsdebug.net
Autres outils:
https://ssl-config.mozilla.org/
https://observatory.mozilla.org/
#### Installer bat - fd
```bash
apt install fzf bat fd-find
```
```bash
mkdir -p ~/.local/bin
ln -s /usr/bin/batcat ~/.local/bin/bat
ln -s $(which fdfind) ~/.local/bin/fd
```
#### Commande plesk:

View File

@@ -2,6 +2,22 @@
#### Installer et activer l'extension Docker dans Plesk
#### Installer docker-compose
```bash
# curl -L "https://github.com/docker/compose/releases/download/v2.23.3/docker-compose-$(uname -s)-$(uname -m)" -o /usr/local/bin/docker-compose
# chmod +x /usr/local/bin/docker-compose
```
https://docs.docker.com/compose/
### Serveur Joplin:
https://github.com/laurent22/joplin/blob/dev/packages/server/README.md
@@ -24,7 +40,7 @@ version: '3'
services:
db:
image: postgres:13.1
image: postgres:16
ports:
- "5432:5432"
restart: unless-stopped
@@ -36,7 +52,7 @@ services:
- POSTGRES_USER=joplin
- POSTGRES_DB=joplin
app:
image: joplin/server:2.4.1-beta
image: joplin/server:latest
depends_on:
- db
ports:
@@ -76,11 +92,31 @@ Créer un sous-domaine **joplin.maboiteverte.fr**
Créer une <u>règle de proxy Docker</u>:
- URL: Joplin.maboiteverte.fr/
- Conteneur: Joplin_app_1
- Conteneur: joplin_app_1
- Port: 22300 -> 22300
#### Premier lancement:
Aller sur joplin.maboiteverte.fr. Se connecter avec:
- admin@localhost
- admin
puis changer le mot-de-passe. Créer un 2nd utilisateur.
Si la synchro se passe bien, mais qu'il n'y a rien sur le serveur:
- dans le client, exporter un ficher .JEX puis créer un nouveau profil.
- dans le nouveau profil, importer le fichier .JEX
- dans les réglages, re-parametrer la synchronisation.
- re-lancer la synchronisation.
#### Voir les logs:
```bash
@@ -89,6 +125,10 @@ Password:
Attaching to joplin_app_1, joplin_db_1
app_1 | WARNING: no logs are available with the 'db' log driver
db_1 | WARNING: no logs are available with the 'db' log driver
# Au fil de l'eau...
$ sudo docker container ls
$ sudo docker logs --follow 05850da6082e
```

View File

@@ -217,3 +217,16 @@ Array
)
```
#### Bash
```bash
~/Sites/sls/a master* 18:04:36
sqlite3 contact8.sqlite "SELECT * FROM Details"
1|Rebbecca|Didio|03-8174-9123|rebbecca.didio@didio.com.au
2|Stevie|Hallo|07-9997-3366|stevie.hallo@hotmail.com
3|Mariko|Stayer|08-5558-9019|mariko_stayer@hotmail.com
```

View File

@@ -2,6 +2,8 @@
##### Sous Linux:
```bash
bruno@debian:~$ sudo fdisk -l
@@ -18,8 +20,6 @@ Périphérique Amorçage Début Fin Secteurs Taille Id Type
/dev/sdg2 98304 62333951 62235648 29,7G 83 Linux
```
```bash
bruno@debian:~$ sudo dd if=/dev/sdg of=~/raspian_backup.img status=progress
62333952+0 enregistrements lus
@@ -31,6 +31,26 @@ bruno@debian:~$ sudo dd if=/dev/sdg of=~/raspian_backup.img status=progress
# status=progress sinon dd n'affiche rien
```
##### Sur Mac:
```bash
$ diskutil list
/dev/disk6 (external, physical):
#: TYPE NAME SIZE IDENTIFIER
0: FDisk_partition_scheme *8.0 GB disk6
1: Windows_FAT_32 NO NAME 134.2 MB disk6s1
2: Linux 7.9 GB disk6s2
```
```bash
$ sudo dd if=/dev/disk6 of=/Volumes/Sophie/RaspberryPi/DietPi_RPi1-ARMv6.dmg
15712256+0 records in
15712256+0 records out
8044675072 bytes (8,0 GB, 7,5 GiB) copied, 143,074 s, 56,2 MB/s
```
Avec dd, l'image à la même taille que le disque, quelque soit la taille occupée réelle.
@@ -84,3 +104,15 @@ $ sudo dd if=~/raspian_backup.img of=/dev/sdg
Utiliser WinDisk32 sourceforge.net/projects/win32diskimager
# Sous macOS
ApplePi-Baker permet de sauvegarder un carte SD en fichier image, et surtout de shrinker l'image (contracter une SD de 32Go en l'espace réellement utilisé)
[ApplePi-Baker v2](https://www.tweaking4all.com/software/macosx-software/applepi-baker-v2/)
```bash
brew install applepi-baker
```

176
docs/Raspberry/diet-pi.md Normal file
View File

@@ -0,0 +1,176 @@
# Diet-pi
### Dropbear
```bash
systemctl status dropbear.service
```
Fichier de configuration: `/etc/default/dropbear`
```bash
# The TCP port that Dropbear listens on
DROPBEAR_PORT=51322
```
```bash
systemctl status dropbear.service
● dropbear.service - Lightweight SSH server
Loaded: loaded (/lib/systemd/system/dropbear.service; enabled; preset: enabled)
Active: active (running) since Sat 2024-02-10 09:07:47 GMT; 7s ago
Docs: man:dropbear(8)
Main PID: 3107 (dropbear)
Tasks: 5 (limit: 1069)
CPU: 68ms
CGroup: /system.slice/dropbear.service
├─3060 /usr/sbin/dropbear -EF -p 22 -W 65536 -2 8
├─3061 -bash
├─3107 /usr/sbin/dropbear -EF -p 51322 -W 65536
├─3109 systemctl status dropbear.service
└─3110 "(pager)"
```
```bash
# disallow root login
DROPBEAR_EXTRA_ARGS=“-w -g”
```
Editer et redémarrer Dropbear:
```bash
root@PiHole1:~# nano /etc/default/dropbear
root@PiHole1:~# systemctl restart dropbear.service
```
#### --help
```bash
Dropbear server v2022.83 https://matt.ucc.asn.au/dropbear/dropbear.html
Usage: dropbear [options]
-b bannerfile Display the contents of bannerfile before user login
(default: none)
-r keyfile Specify hostkeys (repeatable)
defaults:
- dss /etc/dropbear/dropbear_dss_host_key
- rsa /etc/dropbear/dropbear_rsa_host_key
- ecdsa /etc/dropbear/dropbear_ecdsa_host_key
- ed25519 /etc/dropbear/dropbear_ed25519_host_key
-R Create hostkeys as required
-F Don't fork into background
-e Pass on server process environment to child process
-E Log to stderr rather than syslog
-m Don't display the motd on login
-w Disallow root logins
-G Restrict logins to members of specified group
-s Disable password logins
-g Disable password logins for root
-B Allow blank password logins
-t Enable two-factor authentication (both password and public key required)
-T Maximum authentication tries (default 10)
-j Disable local port forwarding
-k Disable remote port forwarding
-a Allow connections to forwarded ports from any host
-c command Force executed command
-p [address:]port
Listen on specified tcp port (and optionally address),
up to 10 can be specified
(default port is 22 if none specified)
-P PidFile Create pid file PidFile
(default /var/run/dropbear.pid)
-i Start for inetd
-W <receive_window_buffer> (default 24576, larger may be faster, max 10MB)
-K <keepalive> (0 is never, default 0, in seconds)
-I <idle_timeout> (0 is never, default 0, in seconds)
-z disable QoS
-V Version
```
#### Passwordless:
```bash
cat ~/.ssh/id_rsa.pub | ssh -p65535 root@192.168.12.116 'cat>> ~/.ssh/authorized_keys'
```
### Log
```bash
# journalctl --no-pager | grep 'cron'
```
| Command | Remark |
| :-------------------------------------------- | :----------------------------------------------------------- |
| `journalctl -u UNITNAME` (`--unit UNITNAME`) | Displays messages of the given unit |
| `journalctl _PID=<process_id>` | Displays messages of process with PID equals to <process_id> |
| `journalctl -r` (`--reverse`) | Displays list in reverse order, i.e. newest messages first |
| `journalctl -f` (`--follow`) | Displays the tail of the log message list and shows new entries *live* |
| `journalctl -b` (`--boot`) | Displays messages since the last boot (i.e. no older messages). See also option `--list-boots` |
| `journalctl -k` (`--dmesg`) | Displays kernel messages |
| `journalctl -p PRIORITY` (priority PRIORITY) | Displays messages with the given priority. PRIORITY may be `merg`, `alert`, `crit`, `err`, `warning`, `notice`, `info` and `debug`. Also numbers as PRIORITY are possible |
| `journalctl -o verbose` | Displays additional meta data |
| `journalctl --disk-usage` | Displays the amount of disk space used by the logging messages |
| `journalctl --no-pager | grep <filter>` | Filters log messages (filtering with `grep`) |
#### Quand sont exécutés les scripts cron.hourly, cron.daily, cron.montly...
```bash
grep run-parts /etc/crontab
#*/0 * * * * root cd / && run-parts --report /etc/cron.minutely
17 * * * * root cd / && run-parts --report /etc/cron.hourly
25 1 * * * root test -x /usr/sbin/anacron || { cd / && run-parts --report /etc/cron.daily; }
47 1 * * 7 root test -x /usr/sbin/anacron || { cd / && run-parts --report /etc/cron.weekly; }
52 1 1 * * root test -x /usr/sbin/anacron || { cd / && run-parts --report /etc/cron.monthly; }
```
#### Exécuter les scripts cron.hourly, cron.daily, cron.montly...
```bash
run-parts --test /etc/cron.hourly/
```
#### Ajouter un script à cron.hourly
```bash
root@PiHole1:/etc/cron.hourly# l
total 16
-rw-r--r-- 1 root root 102 Mar 2 2023 .placeholder
-rwxr-xr-x 1 root root 1311 Aug 27 19:49 dietpi
-rwxr-xr-x 1 root root 191 Feb 22 2012 fake-hwclock
-rwxr-xr-x 1 root root 60 Sep 5 09:47 pihole
```
```bash
nano /etc/cron.hourly/pihole
#!/bin/bash
#Look for pihole update
/root/update_pihole.sh
```
```bash
*/2 * * * * # toutes les 2 minutes
* */2 * * * # toutes les 2 heures
0 1 * * 1,3,5 # At 01:00 on Monday, Wednesday, and Friday
```

View File

@@ -6,23 +6,48 @@
[aptitude](aptitude.md)
[Argon One](Argon-one.md)
[Backup](backup.md)
[Boot et clone](boot.md)
[Cloud](cloud.md)
[Diet-pi](diet-pi.md)
[Envoyer un mail depuis le Raspberry](send_mail.md)
[Hardware](hardware.md)
[Heure](heure.md)
[Installation sans écran](headless.md)
[Mail](mail.md)
[Matériels](materiels/materiels.md)
[Réseau](reseau.md)
[Boot et clone](boot.md)
[Nextcloud](nextcloud.md)
[Pi Desktop](pi-desktop.md)
[Divers](divers.md)
[Pi-hole](pi-hole.md)
[Python](python.md)
[Réseau](reseau.md)
[Pi Desktop](pi-desktop.md)
[Rclone](rclone.md)
[Réseau](reseau.md)
[Services](services.md)
[SiriControl](siri_control.md)
[Tools](tools.md)
[Divers](divers.md)

217
docs/Raspberry/mail.md Normal file
View File

@@ -0,0 +1,217 @@
### Envoyer un mail depuis le Raspberry
Il faut installer **msmtp**:
```bash
# apt install bsd-mailx msmtp msmtp-mta
```
#### Configuration:
https://doc.ubuntu-fr.org/tutoriel/comment_envoyer_un_mail_par_smtp_en_ligne_de_commande
https://wiki.archlinux.org/title/Msmtp
http://www.futurile.net/resources/msmtp-a-simple-mail-transfer-agent/
```bash
nano /etc/msmtprc
```
```bash
#Set default values for all accounts.
defaults
auth login
tls on
tls_starttls off
tls_certcheck on
tls_trust_file /etc/ssl/certs/ca-certificates.crt
logfile /var/log/msmtp.log
#OVH settings
account ovh
host ssl0.ovh.net
#auth login
#tls on
#tls_starttls off
#tls_certcheck on
#tls_trust_file /etc/ssl/certs/ca-certificates.crt
from xxxxxxxxxxx@clicclac.info
port 465
user xxxxxxxxxxx@clicclac.info
password xxxxxxxxxxx
#Orange settings
account orange
host smtp.orange.fr
protocol smtp
#auth login
#tls on
#tls_starttls off
#tls_certcheck on
#tls_trust_file /etc/ssl/certs/ca-certificates.crt
from byyyyyyyyyyyyy@orange.fr
maildomain orange.fr
port 465
user byyyyyyyyyyyyy
password yyyyyyyyyyyyy
#Set a default account
account default : orange
```
on sécurise le fichier de config:
```bash
root@PiHole2:~# chown root:msmtp /etc/msmtprc
root@PiHole2:~# chmod 640 /etc/msmtprc
```
Encrypter `/etc/msmtprc`
https://www.howtoraspberry.com/2021/06/how-to-send-mail-from-a-raspberry-pi/
#### Test:
```bash
root@PiHole2:~# echo "BEEP BEEP" | mailx -s "Subject: This is a test!" bxxxxxxxxxxx@orange.fr
```
nano test.mail
```bash
To: bxxxxxxxxxxx@orange.fr
From: bxxxxxxxxxxx@orange.fr
Subject: Pi-Hole update
Hello there.
version 3
```
```bash
cat test.mail | msmtp --read-envelope-from --read-recipients
cat test.mail | msmtp --account=default --read-envelope-from --read-recipients
```
```
cat /tmp/fichier | mail
```
```
apt install libsecret-tools
```
```bash
gpg --full-generate-key
gpg: /root/.gnupg/trustdb.gpg : base de confiance créée
gpg: répertoire « /root/.gnupg/openpgp-revocs.d » créé
gpg: revocation certificate stored as '/root/.gnupg/openpgp-revocs.d/75199AB29FD34F8BDEA93ABF97857FE7ED14794A.rev'
les clefs publique et secrète ont été créées et signées.
pub rsa3072 2024-03-02 [SC]
75199AB29FD34F8BDEA93ABF97857FE7ED14794A
uid pihole <liste@clicclac.info>
sub rsa3072 2024-03-02 [E]
```
```bash
# gpg --list-secret-keys --keyid-format LONG
gpg: vérification de la base de confiance
gpg: marginals needed: 3 completes needed: 1 trust model: pgp
gpg: profondeur : 0 valables : 1 signées : 0
confiance : 0 i., 0 n.d., 0 j., 0 m., 0 t., 1 u.
/root/.gnupg/pubring.kbx
------------------------
sec rsa3072/97857FE7ED14794A 2024-03-02 [SC]
75199AB29FD34F8BDEA93ABF97857FE7ED14794A
uid [ ultime ] pihole <liste@clicclac.info>
ssb rsa3072/9B43CA525CFA97A6 2024-03-02 [E]
```
https://unix.stackexchange.com/questions/614737/how-to-cache-gpg-key-passphrase-with-gpg-agent-and-keychain-on-debian-10
```bash
# util ou pas ?
~/.gnupg# mv gnu.conf gpg.conf
```
```
keychain --eval --agents gpg pihole
* keychain 2.8.5 ~ http://www.funtoo.org
* Found existing gpg-agent: 238
GPG_AGENT_INFO=/root/.gnupg/S.gpg-agent:238:1; export GPG_AGENT_INFO;
* Adding 1 gpg key(s): pihole
```
```bash
~# apparmor_parser -R /etc/apparmor.d/usr.bin.msmtp
Cache read/write disabled: interface file missing. (Kernel needs AppArmor 2.4 compatibility patch.)
Avertissement : impossible de trouver un syst?me de fichiers appropri? dans /proc/mounts, est-il mont? ?
Utilisez --subdomainfs pour remplacer.
# apt install apparmor-utils
~# systemctl enable apparmor
Synchronizing state of apparmor.service with SysV service script with /lib/systemd/systemd-sysv-install.
Executing: /lib/systemd/systemd-sysv-install enable apparmor
~# systemctl status apparmor
○ apparmor.service - Load AppArmor profiles
Loaded: loaded (/lib/systemd/system/apparmor.service; enabled; preset: enabled)
Active: inactive (dead)
Docs: man:apparmor(7)
https://gitlab.com/apparmor/apparmor/wikis/home/
~# apparmor_parser -R /etc/apparmor.d/usr.bin.msmtp
Cache read/write disabled: interface file missing. (Kernel needs AppArmor 2.4 compatibility patch.)
Avertissement : impossible de trouver un syst?me de fichiers appropri? dans /proc/mounts, est-il mont? ?
Utilisez --subdomainfs pour remplacer.
```
https://discourse.pi-hole.net/t/apparmor-reference-in-unbound-guide-clarification-requested/62351/3
```bash
~# gpg --encrypt --output orange.mail.gpg --recipient liste@clicclac.info orange.mail
~# gpg --decrypt --output file.txt orange.mail.gpg
```

187
docs/Raspberry/pi-hole.md Normal file
View File

@@ -0,0 +1,187 @@
# pi-hole & unbound
#### RasbianOS
| **Version** | **Code name** | **Current status** | **Release date** | **End-of-life ([LTS](https://wiki.debian.org/LTS))** |
| ----------- | ------------- | ------------------ | ---------------- | ---------------------------------------------------- |
| 11 | Bullseye | oldstable | 2021-08-14 | 2024-08-14 (2026-08-31) |
| 12 | Bookworm | stable | 2023-06-10 | 2026-06-10 (2028-06-30) |
| 13 | Trixie | testing | 2025-06-?? | 2028-06-?? (2030-06-??) |
##### Installer Pi-Hole et Unbound:
https://www.crosstalksolutions.com/the-worlds-greatest-pi-hole-and-unbound-tutorial-2023/
https://mediacenterz.com/tutoriel-complete-pi-hole-bloqueur-dannonces-pour-toute-la-maison/
##### Installer Gravity Sync:
https://github.com/vmstan/gravity-sync
##### Installer keepalived:
https://davidshomelab.com/pi-hole-failover-with-keepalived/
##### Version de Pi-Hole
```bash
$ pihole -v
Pi-hole version is v5.17.3 (Latest: v5.17.3)
web version is v5.21 (Latest: v5.21)
FTL version is v5.25 (Latest: v5.25.1)
```
##### Mise-à-jour de Pi-Hole:
```bash
$ pihole -up
```
##### Changer le mot de passe de linterface Web PiHole
```bash
$ pihole -a -p
```
##### Liste noire:
- **`pihole -b -l`** Liste des domaines sur la liste noire
- **`pihole -b exemple.com`** Ajouter example.com à la liste noire
- **`pihole -b -d example.com`** Supprimer exemple.com de la liste noire
##### Liste blanche:
- **`pihole -w -l`** Liste des domaines dans la liste blanche
- **`pihole -w exemple.com`** Ajouter example.com à la liste blanche
- **`pihole -w -d example.com`** Supprimer exemple.com de la liste blanche
##### Activer / désactiver Pi-Hole:
- **`pihole enable`** Activer PiHole
- **`pihole disable`** Désactiver PiHole en permanence
- **`pihole disable 10m`** Désactiver PiHole pendant 10 minutes
- **`pihole disable 60s`** Désactiver PiHole pendant 1 min
### Activer la résolution local sur le PiHole (Loopback)
Se connecter sur le Pi-Hole en ssh, puis:
```bash
echo "addn-hosts=/etc/pihole/lan.list" | sudo tee /etc/dnsmasq.d/02-lan.conf
```
On crée le fichier `/etc/pihole/lan.list`
```bash
nano /etc/pihole/lan.list
```
que l'on remplit avec les IP/serveurs
```bash
Adresse IP nom de domaine nom du serveur
192.168.1.xx service.nomdedomaine nomduserveur
```
```bash
192.168.2.57 navidrome.photos-nas.ovh navidrome
192.168.2.57 ds923.photos-nas.ovh dsm
192.168.2.57 maloja.photos-nas.ovh maloja
192.168.2.57 photos.photos-nas.ovh photos
192.168.2.57 change.photos-nas.ovh changedetection
192.168.2.57 search.photos-nas.ovh searxng
192.168.2.57 vault.photos-nas.ovh vaultwarten
192.168.2.1 asus.photos-nas.ovh asus
192.168.2.1 www.asusrouter.com et12
192.168.2.116 pihole1.photos-nas.ovh dietpi1
192.168.2.216 pihole2.photos-nas.ovh dietpi2
```
On redémarre le service DNS:
```bash
pihole restartdns
```
https://induste.com/threads/utiliser-un-pihole-pour-creer-une-loopback-orange-bouygues-etc.634410/
### Liens:
https://www.reddit.com/r/pihole/comments/tsperl/comment/i2sr22h/
https://discourse.pi-hole.net/t/pihole-unbound-not-working-as-it-should/51381/12
https://docs.pi-hole.net/guides/dns/unbound/#disable-resolvconf-for-unbound-optional
https://alain-michel.canoprof.fr/eleve/tutoriels/raspberry/utiliser-pi-hole-pour-bloquer-les-pubs/
https://nicolasforcet.com/nettoyer-base-de-donnees-pihole-ftldb-log/
https://nicolasforcet.com/raspberry-pi-limiter-drastiquement-les-logs-et-les-mettre-en-ram-pour-preserver-sa-carte-sd/
Using “sudo nmtui” I was able to change my network settings. I
The Fireborg : The Big Blocklist Collection
https://firebog.net
Gravity Sync:
https://github.com/vmstan/gravity-sync
https://github.com/azlux/log2ram
https://dnscheck.tools
```
vcgencmd get_throttled <-- will give you info on conditions that may have caused throttling.
```
```
Great write up, thanks.
Instead of a firewall rule, blocking all DNS queries except to Pihole, better create a NAT Port Forward rule, so that all DNS queries except router or a Pihole are redirected to the pihole address. Thus DNS resolution will continue working (for allowed domains) even if somebody (or some malicious IoT device) use custom DNS.
I have an OPNsense (pfSense fork) with Unbound installed onboard (192.168.0.1), and a PiHole on another box (192.168.0.100).
All devices by default query DNS from the router (192.168.0.1).
Above mentioned NAT Port Forward rule redirects all DNS queries to the Pihole (192.168.0.100).
Pihole has Unbound on a router as an upstream (192.168.0.1).
SO, unfortunately, there are some networks hops to and fro, but I cant install Pihole on an OPNsense router (its FreeBSD and not Linux).
```
```
Hopefully CrossTalk sees this, but this guide needs to be updated. As of the 10/10/2023 of Debian Bookworm. Debian (and by extension Raspberry Pi OS) does not use dhcpcd as the networking interface. Instead, it uses the more complex/robust NetworkManager. This change can be found in the release notes for RaspberyPi OS here: https://downloads.raspberrypi.com/raspios_lite_arm64/release_notes.txt.
This means that the dhcpcd.conf file will not exist under /etc as the guide suggest. There are ways to use nmcli (the command the interact with NetworkManager) to set a static IP. However, I recommend simply setting up a DHCP reservation using your router. Either way, you cannot set up a static IP using dhcpcd.conf. Hope this helps anyone on their PiHole journey!
```
```
static IP on Bookworm:
credits to https://raspberrypi-guide.github.io/networking/set-up-static-ip-address
So I used nmtui command to set up a static ip on bookworm. Looks like they got rid of dhcpcd by default and are going with NetworkManager.
1. type sudo nmtui so you have the right permissions
2. edit the connection you want
4. change ipv4 config to manual
5. Enter your desired ip address into addresses (with a trailing /24, e.g. 192.168.1.77/24)
6. I put my routers ip in the gateway and DNS fields, and also added a second 8.8.8.8 for DNS
7. Exit out of nmtui
8. reboot and it should work
```

View File

@@ -0,0 +1,151 @@
# Wireguard
### Installer wireguard sur le NAS:
| Model | CPU Model | Cores (each) | Threads (each) | FPU | Package Arch | RAM |
| :----- | :------------------ | :----------- | :------------- | :--- | :----------- | :------------------ |
| DS923+ | AMD Ryzen R1600 | 2 | 4 | ✓ | R1000 | DDR4 ECC SODIMM 4GB |
| DS916+ | Intel Pentium N3710 | 4 | 4 | ✓ | Braswell | DDR3 2GB/8GB |
```bash
sudo docker run --rm --privileged --env PACKAGE_ARCH=r1000 --env DSM_VER=7.2 -v $(pwd):/result_spk synobuild
```
**ne compile pas**
```bash
docker run --rm --privileged --env PACKAGE_ARCH=x64 --env DSM_VER=7.2 -v /volume1/docker/toolkit_tarballs:/toolkit_tarballs -v /volume1/docker/synowirespk71:/result_spk blackvoidclub/synobuild71
docker run --rm --privileged --env PACKAGE_ARCH=x64 --env DSM_VER=7.2 -v /volume1/docker/toolkit_tarballs:/toolkit_tarballs -v /volume1/docker/synowirespk71:/result_spk blackvoidclub/synobuild71
```
### wg-easy
https://github.com/wg-easy/wg-easy
| Création | Portainer (/portainer/compose/29) |
| ----------- | ------------------------------------- |
| Mise-à-jour | Watchtower |
| Ports | 51820:51821 |
| Volume | /volume1/docker/wgeasy:/etc/wireguard |
```yaml
version: "3.5"
services:
wgeasy:
image: ghcr.io/wg-easy/wg-easy:latest
network_mode: "bridge"
container_name: wgeasy
ports:
- "51820:51820/udp"
- "51821:51821"
cap_add:
- NET_ADMIN
- SYS_MODULE
sysctls:
- net.ipv4.conf.all.src_valid_mark=1
- net.ipv4.ip_forward=1
env_file:
- stack.env
labels:
- "com.centurylinklabs.watchtower.enable=true"
volumes:
- /volume1/docker/wgeasy:/etc/wireguard
environment:
- WG_HOST=photos-nas.ovh
- WG_DEFAULT_DNS=192.168.2.216
restart: always
```
Variable d'enrironnement (dans stack.env)
```yaml
PASSWORD=xxxxxxxxxx
```
##### wg0.conf
```
# Note: Do not edit this file directly.
# Your changes will be overwritten!
# Server
[Interface]
PrivateKey = I5wPTHTxgF[5YyJ@4'Mgwipl+.m=aUB6i[bGAF;p:I|l
Address = 10.8.0.1/24
ListenPort = 51820
PreUp =
PostUp = iptables -t nat -A POSTROUTING -s 10.8.0.0/24 -o eth0 -j MASQUERADE; iptables -A INPUT -p udp -m udp --dport 51820 -j ACCEPT; iptables -A FORWARD -i wg0 -j ACCEPT; iptables -A FORWARD -o wg0 -j ACCEPT;
PreDown =
PostDown =
# Client: ungoutdepomme (557cb02f-6418-465c-8063-866333011ea0)
[Peer]
PublicKey = )RGRG/Aha{XD{sC)YDg(6[kPOF.yaUO1[)QaFlJZg+-P
PresharedKey = H0I/[YLYCU-'r*BiU8HR2KsuiZa{@v*6q#G;yEt6TFJ8
AllowedIPs = 10.8.0.2/32
# Client: airbook (f75ed225-6a8b-4179-a11a-368aec6d2545)
[Peer]
PublicKey = 4!oI:!t9-V:;TxZ{gfJFXuPkS\v2A6+Ka[36.tP=;ao[
PresharedKey = Q.7pJZ32geS|DwZGdQ=O=\D}0XlHUzsk!WE(GD7yGhbB
AllowedIPs = 10.8.0.3/32
```
##### airbook.conf
```
[Interface]
PrivateKey = -dPlB6Glr9IKBo-q(.5X)7Ad+}YuAe9S3):dBPbFLrWX
Address = 10.8.0.3/24
DNS = 192.168.2.216
[Peer]
PublicKey = wXbLtH/'#-;*6"j1ZQgl?.)VMRqMG@g!U38jvw}3(=Yw
PresharedKey = Q.7pJZ32geS|DwZGdQ=O=\D}0XlHUzsk!WE(GD7yGhbB
AllowedIPs = 0.0.0.0/0, ::/0
PersistentKeepalive = 0
Endpoint = photos-nas.ovh:51820
```
##### ungoutdepomme.conf
```
[Interface]
PrivateKey = #Udj:SC,kA0h}MTa7\|as69PV.sS@lpL!'4y34uj?.Z{
Address = 10.8.0.2/24
DNS = 192.168.2.216
[Peer]
PublicKey = ukj!q\:v-70L/Rlr,TJ9];,19,=Uu-1a+O/V|B@OCULq
PresharedKey = H0I/[YLYCU-'r*BiU8HR2KsuiZa{@v*6q#G;yEt6TFJ8
AllowedIPs = 0.0.0.0/0, ::/0
PersistentKeepalive = 0
Endpoint = photos-nas.ovh:51820
```
# `#Token modifié`

View File

@@ -0,0 +1,41 @@
# Pi.Alert
| Création | Portainer |
| ----------- | ---------------------------------------------- |
| Mise-à-jour | Watchtower |
| Ports | 17811 |
| Lien | [Github](https://github.com/pucherot/Pi.Alert) |
```yaml
version: "3.9"
services:
pi.alert:
container_name: Pi.Alert
healthcheck:
test: curl -f http://localhost:17811/ || exit 1
mem_limit: 2g
cpu_shares: 768
security_opt:
- no-new-privileges:true
labels:
- "com.centurylinklabs.watchtower.enable=true"
volumes:
- /volume1/docker/pialert/config:/home/pi/pialert/config:rw
- /volume1/docker/pialert/db:/home/pi/pialert/db:rw
- /volume1/docker/pialert/logs:/home/pi/pialert/front/log:rw
environment:
TZ: Europe/Paris
PORT: 17811
HOST_USER_ID: 1026
HOST_USER_GID: 100
network_mode: host
restart: on-failure:5
image: jokobsk/pi.alert:latest
```

View File

@@ -0,0 +1,26 @@
# network
| | |
| -------------- | ------------------- |
| 172.16.57.0/24 | changeDetection |
| 172.16.58.0/24 | docspell |
| 172.16.59.0/24 | diun / paperlessngx |
| 172.16.60.0/24 | send |
| 172.16.61.0/24 | chibisafe |
| 172.16.62.0/24 | psitransfer |
| 172.16.63.0/24 | pingvin |
| 172.16.64.0/24 | gokapi |
| 172.16.65.0/24 | snippetbox |
| 172.16.66.0/24 | materialious |
| 172.16.67.0/24 | snapdrop |
| 172.16.69.0/24 | seafile |
| 172.16.70.0/24 | kavita |
| | |
| 172.16.72.0/24 | invidious |
| 172.16.73.0/24 | nextcloud |
| 172.16.77.0/24 | maptiler |
| 172.16.78.0/24 | mapserver |
| | |

View File

@@ -0,0 +1,38 @@
# changeDetection
| Création | dockge |
| ----------- | -------------------------------------------------------- |
| Mise-à-jour | Watchtower |
| Ports | 5075 |
| Liens | [Github](https://github.com/dgtlmoon/changedetection.io) |
```yaml
version: '3.2'
services:
changedetection:
image: ghcr.io/dgtlmoon/changedetection.io
container_name: changedetection
hostname: changedetection
labels:
- "com.centurylinklabs.watchtower.enable=true"
volumes:
- changedetection-data:/datastore
ports:
- 5075:5000
volumes:
changedetection-data:
networks:
default:
driver: bridge
ipam:
config:
- subnet: 172.16.57.0/24
```

View File

@@ -0,0 +1,38 @@
# chibisafe
| Création | dockge |
| ----------- | ------------------------------------------------ |
| Mise-à-jour | Watchtower |
| Ports | 24424 |
| Liens | [Github](https://github.com/chibisafe/chibisafe) |
```yaml
version: "3.7"
services:
chibisafe:
image: chibisafe/chibisafe:latest
container_name: chibisafe
volumes:
- /volume1/docker/dockge/stacks/chibisafe/database:/home/node/chibisafe/database:rw
- /volume1/docker/dockge/stacks/chibisafe/uploads:/home/node/chibisafe/uploads:rw
- /volume1/docker/dockge/stacks/chibisafe/logs:/home/node/chibisafe/logs:rw
ports:
- 24424:8000
labels:
- "com.centurylinklabs.watchtower.enable=true"
restart: always
networks:
default:
driver: bridge
ipam:
config:
- subnet: 172.16.61.0/24
```

View File

@@ -0,0 +1,37 @@
# dockge
| Création | Container Manager (projet) |
| ----------- | -------------------------------------------- |
| Mise-à-jour | - |
| Ports | 5011 |
| Liens | [Github](https://github.com/louislam/dockge) |
```yaml
version: "3.8"
services:
dockge:
image: louislam/dockge:1
restart: unless-stopped
ports:
# Host Port : Container Port
- 5011:5001
volumes:
- /var/run/docker.sock:/var/run/docker.sock
… environment:
# Tell Dockge where is your stacks directory
- DOCKGE_STACKS_DIR=/volume1/docker/dockge/stacks
networks:
default:
driver: bridge
ipam:
config:
- subnet: 172.16.58.0/24
```

View File

@@ -0,0 +1,125 @@
# docspell
| Création | Portainer |
| ----------- | --------------------------------- |
| Mise-à-jour | Watchtower |
| Ports | 8486 |
| Liens | [Docspell](https://docspell.org/) |
```yaml
version: '3.9'
services:
docspell-db:
image: postgres
container_name: Docspell-DB
hostname: docspell-db
security_opt:
- no-new-privileges:true
healthcheck:
test: ["CMD", "pg_isready", "-q", "-d", "docspell", "-U", "docspelluser"]
timeout: 45s
interval: 10s
retries: 10
user: 1026:100
volumes:
- /volume1/docker/docspell/db:/var/lib/postgresql/data
environment:
- POSTGRES_DB=docspell
- POSTGRES_USER=docspelluser
- POSTGRES_PASSWORD=docspellpass
restart: always
docspell-solr:
image: solr
command:
- solr-precreate
- docspell
container_name: Docspell-SOLR
hostname: docspell-solr
security_opt:
- no-new-privileges:true
healthcheck:
test: ["CMD", "curl", "-f", "http://localhost:8983/solr/docspell/admin/ping"]
interval: 45s
timeout: 10s
retries: 2
start_period: 30s
user: 1026:100
volumes:
- /volume1/docker/docspell/solr:/var/solr
restart: always
restserver:
image: docspell/restserver:latest
container_name: Docspell-RESTSERVER
hostname: docspell-restserver
security_opt:
- no-new-privileges:true
user: 1026:100
ports:
- 8486:7880
environment:
- TZ=Europe/Paris
- DOCSPELL_SERVER_INTERNAL__URL=http://docspell-restserver:7880
- DOCSPELL_SERVER_ADMIN__ENDPOINT_SECRET=superdupersecretyeah
- DOCSPELL_SERVER_AUTH_SERVER__SECRET=evenmoresuperdupersecret
- DOCSPELL_SERVER_BACKEND_JDBC_PASSWORD=docspellpass
- DOCSPELL_SERVER_BACKEND_JDBC_URL=jdbc:postgresql://docspell-db:5432/docspell
- DOCSPELL_SERVER_BACKEND_JDBC_USER=docspelluser
- DOCSPELL_SERVER_BIND_ADDRESS=0.0.0.0
- DOCSPELL_SERVER_FULL__TEXT__SEARCH_ENABLED=true
- DOCSPELL_SERVER_FULL__TEXT__SEARCH_SOLR_URL=http://docspell-solr:8983/solr/docspell
- DOCSPELL_SERVER_INTEGRATION__ENDPOINT_ENABLED=true
- DOCSPELL_SERVER_INTEGRATION__ENDPOINT_HTTP__HEADER_ENABLED=true
- DOCSPELL_SERVER_INTEGRATION__ENDPOINT_HTTP__HEADER_HEADER__VALUE=superduperpassword123
- DOCSPELL_SERVER_BACKEND_SIGNUP_MODE=open
- DOCSPELL_SERVER_BACKEND_SIGNUP_NEW__INVITE__PASSWORD=
- DOCSPELL_SERVER_BACKEND_ADDONS_ENABLED=false
restart: always
depends_on:
docspell-db:
condition: service_started
docspell-solr:
condition: service_healthy
joex:
image: docspell/joex:latest
container_name: Docspell-JOEX
hostname: docspell-joex
security_opt:
- no-new-privileges:true
user: 1026:100
environment:
- TZ=Europe/Paris
- DOCSPELL_JOEX_APP__ID=joex1
- DOCSPELL_JOEX_PERIODIC__SCHEDULER_NAME=joex1
- DOCSPELL_JOEX_SCHEDULER_NAME=joex1
- DOCSPELL_JOEX_BASE__URL=http://docspell-joex:7878
- DOCSPELL_JOEX_BIND_ADDRESS=0.0.0.0
- DOCSPELL_JOEX_FULL__TEXT__SEARCH_ENABLED=true
- DOCSPELL_JOEX_FULL__TEXT__SEARCH_SOLR_URL=http://docspell-solr:8983/solr/docspell
- DOCSPELL_JOEX_JDBC_PASSWORD=docspellpass
- DOCSPELL_JOEX_JDBC_URL=jdbc:postgresql://docspell-db:5432/docspell
- DOCSPELL_JOEX_JDBC_USER=docspelluser
- DOCSPELL_JOEX_ADDONS_EXECUTOR__CONFIG_RUNNER=docker,trivial
- DOCSPELL_JOEX_CONVERT_HTML__CONVERTER=weasyprint
restart: always
depends_on:
docspell-db:
condition: service_started
docspell-solr:
condition: service_healthy
networks:
default:
driver: bridge
ipam:
config:
- subnet: 172.16.58.0/24
```

View File

@@ -0,0 +1,36 @@
# dozzle
| Création | Portainer |
| ----------- | ------------------------------------------ |
| Mise-à-jour | Watchtower |
| Ports | 9999 |
| Liens | [Github](https://github.com/amir20/dozzle) |
```yaml
name: dozzle
services:
dozzle:
container_name: dozzle
ports:
- 9999:8080
environment:
- PUID=1026
- PGID=100
- TZ=Europe/Paris
labels:
- "com.centurylinklabs.watchtower.enable=true"
volumes:
- /var/run/docker.sock:/var/run/docker.sock
restart: always
image: amir20/dozzle:latest
```

View File

@@ -0,0 +1,30 @@
# freegeoip
| Création | dockge |
| ----------- | -------------------------------------------- |
| Mise-à-jour | Watchtower |
| Ports | 8080 / 8888 |
| Liens | [Github](https://github.com/t0mer/freegeoip) |
```yaml
version: "2.1"
services:
freegeoip:
image: techblog/freegeoip
container_name: freegeoip
ports:
- 8080:8080
- 8888:8888
labels:
- com.centurylinklabs.watchtower.enable=true
restart: always
networks: {}
```

View File

@@ -0,0 +1,65 @@
# glances
| Création | Portainer |
| ----------- | ------------------------------------------------------------ |
| Mise-à-jour | Watchtower |
| Ports | 61208 |
| Liens | [Github](https://github.com/nicolargo/glances)<br />[Docker](https://github.com/nicolargo/glances/blob/develop/docs/docker.rst) |
```yaml
version: "3.9"
services:
glances:
container_name: Glances
image: nicolargo/glances:latest-full
healthcheck:
test: curl -f http://localhost:61208/ || exit 1
mem_limit: 4g
cpu_shares: 768
security_opt:
- no-new-privileges:true
pid: host
privileged: true
network_mode: host
restart: on-failure:5
ports:
- 61208:61208
volumes:
- /var/run/docker.sock:/var/run/docker.sock:ro
environment:
GLANCES_OPT: -w
```
By default, the /etc/glances/glances.conf file is used (based on docker-compose/glances.conf).
Additionally, if you want to use your own glances.conf file, you can create your own Dockerfile:
```
FROM nicolargo/glances:latest
COPY glances.conf /root/.config/glances/glances.conf
CMD python -m glances -C /root/.config/glances/glances.conf $GLANCES_OPT
```
Alternatively, you can specify something along the same lines with docker run options (notice the GLANCES_OPT environment variable setting parameters for the glances startup command):
```
docker run -e TZ="${TZ}" -v `pwd`/glances.conf:/root/.config/glances/glances.conf -v /var/run/docker.sock:/var/run/docker.sock:ro -v /run/user/1000/podman/podman.sock:/run/user/1000/podman/podman.sock:ro --pid host -e GLANCES_OPT="-C /root/.config/glances/glances.conf" -it nicolargo/glances:latest-full
```
Where `pwd`/glances.conf is a local directory containing your glances.conf file.
glances.conf
https://github.com/nicolargo/glances/blob/develop/conf/glances.conf

View File

@@ -0,0 +1,37 @@
# gokapi
| Création | dockge |
| ----------- | ------------------------------------------ |
| Mise-à-jour | Watchtower |
| Ports | 53842 |
| Liens | [Github](https://github.com/Forceu/Gokapi) |
```yaml
version: "3.7"
services:
gokapi:
container_name: gokapi
volumes:
- /volume1/docker/dockge/stacks/gokapi/data:/app/data
- /volume1/docker/dockge/stacks/gokapi/config:/app/config
ports:
- 53842:53842
labels:
- "com.centurylinklabs.watchtower.enable=true"
image: f0rc3/gokapi:latest
restart: always
networks:
default:
driver: bridge
ipam:
config:
- subnet: 172.16.64.0/24
```

View File

@@ -0,0 +1,36 @@
# Heimdall
| Création | dockge |
| ----------- | ---------------------------------- |
| Mise-à-jour | Watchtower |
| Ports | 8056 |
| Liens | [Heimdall](https://heimdall.site/) |
```yaml
name: heimdall
services:
heimdall:
container_name: heimdall
ports:
- 8056:80
- 7543:443
environment:
- PUID=1026
- PGID=100
- TZ=Europe/Paris
labels:
- com.centurylinklabs.watchtower.enable=true
volumes:
- /volume1/docker/dockge/stacks/heimdall:/config
restart: always
image: ghcr.io/linuxserver/heimdall
networks: {}
```

View File

@@ -0,0 +1,68 @@
# igotify
| Création | Portainer |
| ----------- | ------------------------------------------------------------ |
| Mise-à-jour | Watchtower |
| Ports | 8680 |
| Liens | [Github](https://github.com/androidseb25/iGotify-Notification-Assistent)<br />[Gotify](https://gotify.net) |
```yaml
version: '3.8'
services:
gotify:
container_name: gotify
hostname: gotify
image: ghcr.io/gotify/server:latest
restart: unless-stopped
security_opt:
- no-new-privileges:true
networks:
- net
ports:
- "8680:80"
volumes:
- igotify-data:/app/data
labels:
- "com.centurylinklabs.watchtower.enable=true"
environment:
TZ: Europe/Paris
GOTIFY_DEFAULTUSER_NAME: bruno
GOTIFY_DEFAULTUSER_PASS: 3l.+-OGj8feS*C7b
igotify:
container_name: igotify
hostname: igotify
image: ghcr.io/androidseb25/igotify-notification-assist:latest
restart: unless-stopped
security_opt:
- no-new-privileges:true
pull_policy: always
networks:
- net
ports:
- "8681:8080"
volumes:
- igotify-api-data:/app/data
labels:
- "com.centurylinklabs.watchtower.enable=true"
environment:
IGOTIFY_CLIENT_TOKEN: 'Zm1BPb.iqe,!r=\' # create a client in gotify an add here the client token
GOTIFY_SERVER_URL: 'http://gotify' # default container name from gotify server
networks:
net:
volumes:
igotify-data:
igotify-api-data:
# tokens modifiés
```

View File

@@ -0,0 +1,86 @@
# invidious
| Création | Portainer |
| ----------- | --------------------------------------------- |
| Mise-à-jour | Watchtower |
| Ports | 7601 |
| Liens | [Github](https://github.com/iv-org/invidious) |
```yaml
version: "3.9"
services:
invidious-db:
image: postgres
container_name: Invidious-DB
hostname: invidious-db
security_opt:
- no-new-privileges:true
healthcheck:
test: ["CMD", "pg_isready", "-q", "-d", "invidious", "-U", "kemal"]
timeout: 45s
interval: 10s
retries: 10
user: 1026:100
labels:
- com.centurylinklabs.watchtower.enable=true
volumes:
- /volume1/docker/invidiousdb:/var/lib/postgresql/data
environment:
POSTGRES_DB: invidious
POSTGRES_USER: kemal
POSTGRES_PASSWORD: kemalpw
restart: always
invidious:
image: quay.io/invidious/invidious:latest
container_name: Invidious
hostname: invidious
user: 1026:100
security_opt:
- no-new-privileges:true
healthcheck:
test: wget -nv --tries=1 --spider http://127.0.0.1:3000/api/v1/comments/jNQXAC9IVRw || exit 1
interval: 30s
timeout: 5s
retries: 2
ports:
- 7601:3000
labels:
- com.centurylinklabs.watchtower.enable=true
environment:
INVIDIOUS_CONFIG: |
db:
dbname: invidious
user: kemal
password: kemalpw
host: invidious-db
port: 5432
check_tables: true
captcha_enabled: false
default_user_preferences:
locale: fr
region: FR
external_port: 443
domain: invidious.photos-nas.ovh
hmac_key: Kh9d0h2tV1wIVbqUHTCR5EOxcrc6iB9zLu4UGqIpfXKHjGlksKUWsMyOUw0YVJdC
https_only: true
restart: always
depends_on:
invidious-db:
condition: service_started
networks:
default:
driver: bridge
ipam:
config:
- subnet: 172.16.72.0/24
# token modifié
```

View File

@@ -0,0 +1,70 @@
# maptiler
```yaml
version: "2"
services:
tileserver:
restart: always
image: maptiler/tileserver-gl
container_name: maptiler
privileged: false
ports:
- 8580:8080
volumes:
- /volume1/docker/dockge/stacks/maptiler/data/map:/data
command:
- --verbose
- --mbtiles
- maptiler-osm-2020-02-10-v3.11-europe.mbtiles
networks:
default:
driver: bridge
ipam:
config:
- subnet: 172.16.77.0/24
```
https://tileserver.readthedocs.io/en/latest/index.html
https://blog.tmlmt.com/create-style-and-render-self-hosted-vector-maps/
https://download.geofabrik.de
```
command: ["-p", "80", "-c", "/data/config.json"]
docker run --rm -it -v $(pwd):/data -p 8188:8080 maptiler/tileserver-gl:v3.1.1 -c config.json --verbose
```
```
version: '3.4'
services:
openmaptiles:
image: klokantech/tileserver-gl
ports:
- 8080:80
volumes:
- "./data:/data"
command: "--verbose -c config.json"
```
## Reloading the configuration
It is possible to reload the configuration file without restarting the whole process by sending a SIGHUP signal to the node process.
- The docker kill -s HUP tileserver-gl command can be used when running the tileserver-gl docker container.
- The docker-compose kill -s HUP tileserver-gl-service-name can be used when tileserver-gl is run as a docker-compose service.

View File

@@ -0,0 +1,82 @@
# navidrome -maloja
| Création | Portainer |
| -------------- | ------------------------------------------------------------ |
| Mise-à-jour | Watchtower |
| Port navidrome | 4533 |
| Port maloja | 42010 |
| Liens | [Navidrome](https://www.navidrome.org/)<br />[Github](https://github.com/krateng/maloja) |
```yaml
version: "3"
services:
navidrome:
image: deluan/navidrome
container_name: navidrome
user: 1028:65536 # à modifier par votre propre PGID et PUID
ports:
- "4533:4533" # modifier votre IP donnant vers l'exterieur
environment:
# Optional: put your config options customization here. Examples:
ND_SCANSCHEDULE: 1h
ND_SESSIONTIMEOUT: "24h"
ND_LOGLEVEL: debug
ND_BASEURL: ""
ND_DEFAULTLANGUAGE: fr
ND_ENABLEEXTERNALSERVICES: true
ND_ENABLELOGREDACTING: true
ND_ENABLECOVERANIMATION: false
ND_AUTHREQUESTLIMIT: 10
ND_LISTENBRAINZ_ENABLED: true
ND_LISTENBRAINZ_BASEURL: "http://maloja:42010/apis/listenbrainz/1/"
#ND_LISTENBRAINZ_BASEURL: "https://maloja.photos-nas.ovh/apis/mlj_1/newscrobble"
ND_LASTFM_ENABLED: true
ND_LASTFM_LANGUAGE: fr
ND_ENABLEFAVOURITES: true
ND_ENABLESTARRATING: true
ND_ENABLEUSEREDITING: true
#ND_ENABLEDOWNSAMPLING: "true"
#ND_MAXBITRATE: 128 # can be anything less then your Ogg files bitrate
#ND_DOWNSAMPLECOMMAND: "ffmpeg -i %s -map 0:0 -b:a %bk -v 0 -c:a libopus -f opus -"
ND_PORT: 4533
env_file:
- stack.env
volumes:
- "/volume1/docker/navidrome:/data"
- "/volume1/music:/music:ro"
labels:
- "com.centurylinklabs.watchtower.enable=true"
#- com.centurylinklabs.watchtower.depends-on=
#- "diun.enable=true"
restart: unless-stopped
maloja:
# from dockerhub
image: "krateng/maloja:latest"
container_name: maloja
ports:
- "42010:42010"
restart: unless-stopped
# different directories for configuration, state and logs
volumes:
- "/volume1/docker/maloja/data:/data"
environment:
- "MALOJA_DATA_DIRECTORY=/data"
- "PUID=1028"
- "PGID=65536"
- "MALOJA_LOGGING=true"
env_file:
- stack.env
labels:
- "com.centurylinklabs.watchtower.enable=true"
#- com.centurylinklabs.watchtower.depends-on=
#- "diun.enable=true"
```

View File

@@ -0,0 +1,39 @@
# openstreetmap-tile-server
https://github.com/Overv/openstreetmap-tile-server
Créez un volume Docker pour contenir la base de données PostgreSQL qui contiendra les données OpenStreetMap :
```bash
docker volume create osm-data
```
Téléchargez un `.osm.pbf`extrait de geofabrik.de pour la région qui vous intéresse:
```bash
cd /volume1/docker/dockge/stacks/
mkdir mapserver
cd mapserver
mkdir data
```
```bash
wget http://download.geofabrik.de/france-latest.osm.pbf
wget http://download.geofabrik.de/france.poly
```
L'importer dans PostgreSQL en exécutant un conteneur et en montant le fichier en tant que `/data/region.osm.pbf`.
```bash
docker run \
-v /volume1/docker/dockge/stacks/mapserver/data/france-latest.osm.pbf:/data/region.osm.pbf \
-v osm-data:/data/database/ \
overv/openstreetmap-tile-server \
import
```

View File

@@ -0,0 +1,145 @@
# paperlessngx
| Création | Portainer |
| ----------- | ------------------------------------------------------------ |
| Mise-à-jour | Watchtower |
| Port | 8777 |
| Liens | [Paperless-ngx](https://docs.paperless-ngx.com/)<br />[Github](https://github.com/paperless-ngx/paperless-ngx) |
```yaml
version: "3.9"
services:
redis:
image: redis:7
command:
- /bin/sh
- -c
- redis-server --requirepass redispass
container_name: PaperlessNGX-REDIS
hostname: paper-redis
mem_limit: 512m
mem_reservation: 256m
cpu_shares: 768
security_opt:
- no-new-privileges:true
read_only: true
user: 1026:100
healthcheck:
test: ["CMD-SHELL", "redis-cli ping || exit 1"]
volumes:
- /volume1/docker/paperlessngx/redis:/data:rw
environment:
TZ: Europe/Paris
restart: on-failure:5
db:
image: postgres:16
container_name: PaperlessNGX-DB
hostname: paper-db
mem_limit: 1g
cpu_shares: 768
security_opt:
- no-new-privileges:true
healthcheck:
test: ["CMD", "pg_isready", "-q", "-d", "paperless", "-U", "paperlessuser"]
timeout: 45s
interval: 10s
retries: 10
volumes:
- /volume1/docker/paperlessngx/db:/var/lib/postgresql/data:rw
environment:
POSTGRES_DB: paperless
POSTGRES_USER: paperlessuser
POSTGRES_PASSWORD: paperlesspass
restart: on-failure:5
gotenberg:
image: gotenberg/gotenberg:latest
container_name: PaperlessNGX-GOTENBERG
hostname: gotenberg
security_opt:
- no-new-privileges:true
user: 1026:100
command:
- "gotenberg"
- "--chromium-disable-javascript=true"
- "--chromium-allow-list=file:///tmp/.*"
restart: on-failure:5
tika:
image: ghcr.io/paperless-ngx/tika:latest
container_name: PaperlessNGX-TIKA
hostname: tika
security_opt:
- no-new-privileges:true
user: 1026:100
restart: on-failure:5
paperless:
image: ghcr.io/paperless-ngx/paperless-ngx:latest
container_name: PaperlessNGX
hostname: paperless-ngx
mem_limit: 6g
cpu_shares: 1024
security_opt:
- no-new-privileges:true
healthcheck:
test: ["CMD", "curl", "-fs", "-S", "--max-time", "2", "http://localhost:8000"]
interval: 30s
timeout: 10s
retries: 5
ports:
- 8777:8000
volumes:
- /volume1/docker/paperlessngx/data:/usr/src/paperless/data:rw
- /volume1/docker/paperlessngx/media:/usr/src/paperless/media:rw
- /volume1/docker/paperlessngx/export:/usr/src/paperless/export:rw
- /volume1/docker/paperlessngx/consume:/usr/src/paperless/consume:rw
- /volume1/docker/paperlessngx/trash:/usr/src/paperless/trash:rw
environment:
PAPERLESS_REDIS: redis://:redispass@paper-redis:6379
PAPERLESS_DBENGINE: postgresql
PAPERLESS_DBHOST: paper-db
PAPERLESS_DBNAME: paperless
PAPERLESS_DBUSER: paperlessuser
PAPERLESS_DBPASS: paperlesspass
PAPERLESS_TRASH_DIR: ../trash
PAPERLESS_FILENAME_FORMAT: '{created_year}/{correspondent}/{document_type}/{title}'
PAPERLESS_OCR_ROTATE_PAGES_THRESHOLD: 6
PAPERLESS_TASK_WORKERS: 1
USERMAP_UID: 1026
USERMAP_GID: 100
PAPERLESS_TIME_ZONE: Europe/Paris
PAPERLESS_URL: https://paperlessngx.photos-nas.ovh
PAPERLESS_CSRF_TRUSTED_ORIGINS: https://paperlessngx.photos-nas.ovh
PAPERLESS_OCR_LANGUAGE: deu+eng
PAPERLESS_TIKA_ENABLED: 1
PAPERLESS_TIKA_GOTENBERG_ENDPOINT: http://gotenberg:3000
PAPERLESS_TIKA_ENDPOINT: http://tika:9998
env_file:
- stack.env
restart: on-failure:5
depends_on:
db:
condition: service_healthy
redis:
condition: service_healthy
tika:
condition: service_started
gotenberg:
condition: service_started
networks:
default:
driver: bridge
ipam:
config:
- subnet: 172.16.59.0/24
```

View File

@@ -0,0 +1,37 @@
# pingvin
| Création | dockge |
| ----------- | ----------------------------------------------------- |
| Mise-à-jour | Watchtower |
| Port | 6090 |
| Liens | [Github](https://github.com/stonith404/pingvin-share) |
```yaml
version: "3.7"
services:
pingvin-share:
container_name: Pingvin-Share
ports:
- 6090:3000
volumes:
- /volume1/docker/dockge/stacks/pingvin:/opt/app/backend/data
- /volume1/docker/dockge/stacks/pingvin/public:/opt/app/frontend/public/img
restart: always
image: stonith404/pingvin-share
labels:
- "com.centurylinklabs.watchtower.enable=true"
networks:
default:
driver: bridge
ipam:
config:
- subnet: 172.16.63.0/24
```

View File

@@ -0,0 +1,41 @@
# PrivateBin
| Création | dockge |
| ----------- | -------------------------------------- |
| Mise-à-jour | Watchtower |
| Port | 8380 |
| Liens | [PrivateBin](https://privatebin.info/) |
```yaml
name: privatebin
services:
nginx-fpm-alpine:
restart: always
read_only: true
environment:
TZ: Europe/Paris
ports:
- 8380:8080
labels:
- com.centurylinklabs.watchtower.enable=true
volumes:
- /volume1/docker/dockge/stacks/privatebin/conf.php:/srv/cfg/conf.php:ro
- /volume1/docker/dockge/stacks/privatebin/data:/srv/data
- /volume1/docker/dockge/stacks/privatebin:/tmp
- /volume1/docker/dockge/stacks/privatebin:/run
image: privatebin/nginx-fpm-alpine
networks:
default:
driver: bridge
ipam:
config:
- subnet: 172.16.66.0/24
```

View File

@@ -0,0 +1,35 @@
# psitransfer
| Création | dockge |
| ----------- | -------------------------------------------------- |
| Mise-à-jour | Watchtower |
| Port | 3005 |
| Liens | [Github](https://github.com/psi-4ward/psitransfer) |
```yaml
version: "3.7"
services:
psitransfer:
container_name: psitransfer
volumes:
- /volume1/docker/dockge/stacks/psitransfer/data:/data
ports:
- 3005:3000
labels:
- com.centurylinklabs.watchtower.enable=true
image: psitrax/psitransfer
networks:
default:
driver: bridge
ipam:
config:
- subnet: 172.16.62.0/24
```

View File

@@ -0,0 +1,49 @@
# Scrutiny
| Création | Container Manager (projet) |
| ----------- | --------------------------------------------- |
| Mise-à-jour | Watchtower |
| Port | 6070 |
| Liens | [Github](https://github.com/AnalogJ/scrutiny) |
```yaml
services:
scrutiny:
container_name: scrutiny
image: ghcr.io/analogj/scrutiny:master-omnibus
cap_add:
- SYS_RAWIO
- SYS_ADMIN
ports:
- "6070:8080" # webapp
- "8086:8086" # influxDB administration
volumes:
- /run/udev:/run/udev:ro
- /volume1/docker/scrutiny:/opt/scrutiny/config
- /volume1/docker/scrutiny/influxdb:/opt/scrutiny/influxdb
devices:
# - /dev/nvme0n1:/dev/nvme0n1
# - /dev/nvme1n1:/dev/nvme1n1
- /dev/sata1:/dev/sata1
- /dev/sata2:/dev/sata2
- /dev/sata3:/dev/sata3
- /dev/sata4:/dev/sata4
# - /dev/sata5:/dev/sata5
# - /dev/sata6:/dev/sata6
# - /dev/sata7:/dev/sata7
# - /dev/sata8:/dev/sata8
environment:
- SCRUTINY_WEB_INFLUXDB_TOKEN='eo5Kc?t9T/Yrl054Edh6bJYNbhOH3blnWHqDcLHc4ml2ur/IF6?pR1v4BHd!bfB01Qu4pQyPs!?AiBa-8WGoSrkpjdQ'
- SCRUTINY_WEB_INFLUXDB_INIT_USERNAME='bruno'
- SCRUTINY_WEB_INFLUXDB_INIT_PASSWORD='j8s!hJVWCuu*z*LU'
- TIMEZONE=Europe/Paris
restart: unless-stopped
```

View File

@@ -0,0 +1,82 @@
# seafile
| Création | dockge |
| ----------- | ------------------------------------------------------------ |
| Mise-à-jour | Watchtower |
| Port | 8611 |
| Liens | [Manuel](https://manual.seafile.com/)<br />[Seafile](https://www.seafile.com/en/home/)<br />https://mariushosting.com/how-to-install-seafile-on-your-synology-nas/ |
```yaml
version: "3.9"
services:
db:
image: mariadb:11.3-jammy
container_name: Seafile-DB
hostname: seafile-db
mem_limit: 1g
cpu_shares: 768
security_opt:
- no-new-privileges:true
user: 1026:100
volumes:
- /volume1/docker/dockge/stacks/seafile/db:/var/lib/mysql:rw
environment:
MYSQL_ROOT_PASSWORD: rootpass
TZ: Europe/Paris
restart: on-failure:5
cache:
image: memcached:1.6.22
entrypoint: memcached -m 256
container_name: Seafile-CACHE
hostname: memcached
mem_limit: 512m
cpu_shares: 768
security_opt:
- no-new-privileges:true
read_only: true
user: 1026:100
restart: on-failure:5
seafile:
image: seafileltd/seafile-mc:latest
container_name: Seafile
hostname: seafile
mem_limit: 2g
cpu_shares: 768
security_opt:
- no-new-privileges:true
healthcheck:
test: wget --no-verbose --tries=1 --spider http://localhost
volumes:
- /volume1/docker/dockge/stacks/seafile/data:/shared:rw
ports:
- 8611:80
environment:
DB_HOST: seafile-db
DB_ROOT_PASSWD: rootpass
TIME_ZONE: Europe/Paris
SEAFILE_ADMIN_EMAIL: liste@clicclac.info
SEAFILE_ADMIN_PASSWORD: mariushosting
SEAFILE_SERVER_LETSENCRYPT: false
SEAFILE_SERVER_HOSTNAME: seafile.photos-nas.ovh
FORCE_HTTPS_IN_CONF: true
restart: on-failure:5
depends_on:
db:
condition: service_started
cache:
condition: service_started
networks:
default:
driver: bridge
ipam:
config:
- subnet: 172.16.69.0/24
```

View File

@@ -0,0 +1,33 @@
# searXNG
| Création | dockge |
| ----------- | ------------------------------------------------------------ |
| Mise-à-jour | Watchtower |
| Port | 5147 |
| Liens | [Docs](https://docs.searxng.org)<br />[Github](https://github.com/searxng/searxng) |
```yaml
version: '3.9'
services:
searxng:
image: searxng/searxng
container_name: SearXNG
mem_limit: 8g
cpu_shares: 2048
security_opt:
- no-new-privileges:true
labels:
- "com.centurylinklabs.watchtower.enable=true"
volumes:
- /volume1/docker/searxng:/etc/searxng:rw
restart: on-failure:5
ports:
- 5147:8080
```

View File

@@ -0,0 +1,59 @@
# send
| Création | dockge |
| ----------- | ------------------------------------------------------------ |
| Mise-à-jour | Watchtower |
| Port | 1234 |
| Liens | [Github](https://github.com/timvisee/send)<br />[Docker](https://github.com/timvisee/send-docker-compose/tree/master) |
```yaml
version: "3"
services:
send:
image: registry.gitlab.com/timvisee/send:latest
restart: always
ports:
- 1234:1234
volumes:
- /volume1/docker/dockge/stacks/send/uploads:/uploads
labels:
- com.centurylinklabs.watchtower.enable=true
environment:
- VIRTUAL_HOST=send.photos-nas.ovh
- VIRTUAL_PORT=1234
- DHPARAM_GENERATION=false
- NODE_ENV=production
- BASE_URL=https://send.photos-nas.ovh
- PORT=1234
- REDIS_HOST=redis
- FILE_DIR=/uploads
# To customize upload limits
# - EXPIRE_TIMES_SECONDS=3600,86400,604800,2592000,31536000
# - DEFAULT_EXPIRE_SECONDS=3600
# - MAX_EXPIRE_SECONDS=31536000
# - DOWNLOAD_COUNTS=1,2,5,10,15,25,50,100,1000
# - MAX_DOWNLOADS=1000
# - MAX_FILE_SIZE=2684354560
redis:
image: redis:alpine
restart: always
volumes:
- send-redis:/data
volumes:
send-redis: null
networks:
default:
driver: bridge
ipam:
config:
- subnet: 172.16.60.0/24
```

View File

@@ -0,0 +1,41 @@
# snapdrop
| Création | dockge |
| ----------- | ------------------------------------------------ |
| Mise-à-jour | Watchtower |
| Port | 7653 |
| Liens | [Github](https://github.com/RobinLinus/snapdrop) |
```yaml
version: "3.9"
services:
snapdrop:
image: ghcr.io/linuxserver/snapdrop:latest
container_name: Snapdrop
hostname: snapdrop
mem_limit: 1g
cpu_shares: 768
security_opt:
- no-new-privileges:true
healthcheck:
test: wget --no-verbose --tries=1 --spider http://localhost:80
ports:
- 7653:443
labels:
- com.centurylinklabs.watchtower.enable=true
volumes:
- /volume1/docker/dockge/stacks/snapdrop:/config:rw
environment:
TZ: Europe/Paris
PUID: 1026
PGID: 100
restart: on-failure:5
j
```

View File

@@ -0,0 +1,36 @@
# snippet-box
| Création | dockge |
| ----------- | ------------------------------------------------ |
| Mise-à-jour | Watchtower |
| Port | 5010 |
| Liens | [Github](https://github.com/RobinLinus/snapdrop) |
```yaml
version: "3"
services:
snippet-box:
image: pawelmalak/snippet-box:latest
container_name: snippet-box
labels:
- com.centurylinklabs.watchtower.enable=true
volumes:
- /volume1/docker/dockge/stacks/snippetbox/data:/app/data
ports:
- 5010:5000
restart: unless-stopped
networks:
default:
driver: bridge
ipam:
config:
- subnet: 172.16.65.0/24
```

View File

@@ -0,0 +1,57 @@
# Tautulli
| Création | Portainer |
| ----------- | ------------------------------------------------------------ |
| Mise-à-jour | Watchtower |
| Port | 8181 |
| Liens | [Github](https://github.com/Tautulli/Tautulli)<br />[Tautulli](https://tautulli.com/) |
```bash
services:
tautulli:
image: linuxserver/tautulli:latest
container_name: tautulli
environment:
- PUID=1026 #CHANGE_TO_YOUR_UID
- PGID=100 #CHANGE_TO_YOUR_GID
- TZ=Europe/Paris #CHANGE_TO_YOUR_TZ
- UMASK=022
labels:
- "com.centurylinklabs.watchtower.enable=true"
volumes:
- /volume1/docker/tautulli:/config
ports:
- 8181:8181/tcp
network_mode: synobridge
security_opt:
- no-new-privileges:true
restart: always
```
#### Portail de connexion -> Avancé -> Proxy inversé
Entête personnalisé:
| Nom de l'entête | Valeur |
| ----------------- | --------------------------- |
| Host | $host; |
| X-Real-IP | $remote_addr; |
| X-Forwarded-Host | $server_name; |
| X-Forwarded-For | $proxy_add_x_forwarded_for; |
| X-Forwarded-Proto | $scheme; |
| X-Forwarded-Ssl | on; |
https://github.com/Tautulli/Tautulli/wiki/Installation#synology
https://github.com/Tautulli/Tautulli/wiki/Frequently-Asked-Questions#general-q9

View File

@@ -0,0 +1,43 @@
# Watchtower
| Création | Portainer |
| ----------- | ------------------------------------------------------------ |
| Mise-à-jour | Watchtower |
| Port | - |
| Liens | [Watchtower](https://containrrr.dev/watchtower/)<br />[Github](https://github.com/containrrr/watchtower/) |
```yaml
version: '2.1'
services:
watchtower:
image: containrrr/watchtower
container_name: watchtower
hostname: watchtower-nas
network_mode: bridge
environment:
- WATCHTOWER_NOTIFICATIONS=email
- WATCHTOWER_CLEANUP=true
- WATCHTOWER_DEBUG=true
- WATCHTOWER_LABEL_ENABLE=true
- WATCHTOWER_TIMEOUT=30s
#- WATCHTOWER_POLL_INTERVAL=300
- WATCHTOWER_SCHEDULE=0 0 5 * * *
- TZ=Europe/Paris
env_file:
- stack.env
labels:
- "com.centurylinklabs.watchtower.enable=true"
volumes:
- /var/run/docker.sock:/var/run/docker.sock
- /volume1/docker/watchtower/config.json:/root/.docker/config.json
restart: unless-stopped
```

View File

@@ -0,0 +1,44 @@
# wg-easy
| Création | Portainer |
| ----------- | -------------------------------------------- |
| Mise-à-jour | Watchtower |
| Port | 51821 |
| Liens | [Github](https://github.com/wg-easy/wg-easy) |
```yaml
version: "3.5"
services:
wgeasy:
image: ghcr.io/wg-easy/wg-easy:latest
network_mode: "bridge"
container_name: wgeasy
ports:
- "51820:51820/udp"
- "51821:51821"
cap_add:
- NET_ADMIN
- SYS_MODULE
sysctls:
- net.ipv4.conf.all.src_valid_mark=1
- net.ipv4.ip_forward=1
env_file:
- stack.env
labels:
- "com.centurylinklabs.watchtower.enable=true"
volumes:
- /volume1/docker/wgeasy:/etc/wireguard
environment:
- WG_HOST=photos-nas.ovh
- WG_DEFAULT_DNS=192.168.2.216
restart: always
```

View File

@@ -0,0 +1,35 @@
# yacy
| Création | Portainer |
| ----------- | ------------------------------------------------------------ |
| Mise-à-jour | Watchtower |
| Port | 8490 |
| Liens | [Yacy](https://yacy.net)<br />[Github](https://github.com/yacy/yacy_search_server) |
```yaml
services:
yacy:
image: yacy/yacy_search_server:latest
container_name: yacy
network_mode: bridge
dns:
- 192.168.2.116
- 192.168.2.216
user: 1028:65536
labels:
- com.centurylinklabs.watchtower.enable=true
ports:
- 8490:8090
- 8443:8443
volumes:
- /volume1/docker/yacy/data:/opt/yacy_search_server/DATA:rw
restart: unless-stopped
```

View File

@@ -0,0 +1,25 @@
# yatch
```yaml
version: "3"
services:
yacht:
container_name: yacht
restart: unless-stopped
ports:
- 8001:8000
environment:
ADMIN_EMAIL: liste@clicclac.info
SECRET_KEY: 7dJuLowm7E5fuXtdEiG1aZ5XwpV8DMOePr5TxwETXTDvM9MgqYulzlb75OscYF4Yvto63jESprC02ZjegOKxDuQhz
volumes:
- yacht:/config
- /var/run/docker.sock:/var/run/docker.sock
image: selfhostedpro/yacht
volumes:
yacht:
# tokens modifiés
```

View File

@@ -0,0 +1,133 @@
# Grafana
```yaml
version: "3.9"
services:
grafana:
image: grafana/grafana:latest
container_name: Grafana
hostname: grafana
networks:
- grafana-net
mem_limit: 512m
cpu_shares: 512
security_opt:
- no-new-privileges:true
user: 1026:100
healthcheck:
test: wget --no-verbose --tries=1 --spider http://localhost:3000/api/health
ports:
- 3340:3000
volumes:
- /volume1/docker/grafana/data:/var/lib/grafana:rw
environment:
TZ: Europe/Paris
GF_INSTALL_PLUGINS: grafana-clock-panel,grafana-simple-json-datasource,natel-discrete-panel,grafana-piechart-panel
restart: on-failure:5
prometheus:
image: prom/prometheus
command:
- '--storage.tsdb.retention.time=60d'
- '--config.file=/etc/prometheus/prometheus.yml'
container_name: Prometheus
hostname: prometheus-server
networks:
- grafana-net
- prometheus-net
mem_limit: 1g
cpu_shares: 768
security_opt:
- no-new-privileges=true
user: 1026:100
healthcheck:
test: wget --no-verbose --tries=1 --spider http://localhost:9090/ || exit 1
volumes:
- /volume1/docker/grafana/prometheus:/prometheus:rw
- /volume1/docker/grafana/prometheus.yml:/etc/prometheus/prometheus.yml:ro
restart: on-failure:5
node-exporter:
image: prom/node-exporter:latest
command:
- --collector.disable-defaults
- --collector.stat
- --collector.time
- --collector.cpu
- --collector.loadavg
- --collector.hwmon
- --collector.meminfo
- --collector.diskstats
container_name: Prometheus-Node
hostname: prometheus-node
networks:
- prometheus-net
mem_limit: 256m
mem_reservation: 64m
cpu_shares: 512
security_opt:
- no-new-privileges=true
read_only: true
user: 1026:100
healthcheck:
test: wget --no-verbose --tries=1 --spider http://localhost:9100/
restart: on-failure:5
snmp-exporter:
image: prom/snmp-exporter:latest
command:
- '--config.file=/etc/snmp_exporter/snmp.yml'
container_name: Prometheus-SNMP
hostname: prometheus-snmp
networks:
- prometheus-net
mem_limit: 256m
mem_reservation: 64m
cpu_shares: 512
security_opt:
- no-new-privileges:true
read_only: true
user: 1026:100
healthcheck:
test: wget --no-verbose --tries=1 --spider http://localhost:9116/ || exit 1
volumes:
- /volume1/docker/grafana/snmp:/etc/snmp_exporter/:ro
restart: on-failure:5
cadvisor:
image: gcr.io/cadvisor/cadvisor:latest
command:
- '--docker_only=true'
container_name: Prometheus-cAdvisor
hostname: prometheus-cadvisor
networks:
- prometheus-net
mem_limit: 256m
mem_reservation: 64m
cpu_shares: 512
security_opt:
- no-new-privileges=true
read_only: true
volumes:
- /:/rootfs:ro
- /var/run:/var/run:ro
- /sys:/sys:ro
- /var/run/docker.sock:/var/run/docker.sock:ro
restart: on-failure:5
networks:
grafana-net:
name: grafana-net
ipam:
config:
- subnet: 192.168.50.0/24
prometheus-net:
name: prometheus-net
ipam:
config:
- subnet: 192.168.51.0/24
```

View File

@@ -0,0 +1,104 @@
# Docker
### Installation
Installer le paquet Container Manager dans DSM
[DockerHub](https://hub.docker.com/search?q=)
##### Redémarrer Container Manager
```bash
$ systemctl list-units --type=service --all | grep -i docker
pkg-ContainerManager-dockerd.service loaded active running Docker Application Container Engine
pkg-ContainerManager-event-watcherd.service loaded active running Docker event watch service
```
```bash
$ sudo systemctl restart pkg-ContainerManager-dockerd
```
### Utilisation
[Mise-à-jour des containers](updates.md) (Container Manager - WatchTower)
[Ports](ports.md)
```bash
ERROR: could not find an available, non-overlapping IPv4 address pool among the defaults to assign to the network
```
Ajouter à `/etc/docker/daemon.json` (ou le créer)
```json
{
"default-address-pools" : [
{
"base" : "172.17.0.0/12",
"size" : 20
},
{
"base" : "192.168.0.0/16",
"size" : 24
}
]
}
```
Autre solution:
```yaml
networks:
default:
driver: bridge
ipam:
config:
- subnet: 172.16.57.0/24
```
https://straz.to/2021-09-08-docker-address-pools/
https://jareklipski.medium.com/docker-can-only-create-31-default-networks-e7f98f778626
https://serverfault.com/questions/916941/configuring-docker-to-not-use-the-172-17-0-0-range
https://stackoverflow.com/questions/43720339/docker-error-could-not-find-an-available-non-overlapping-ipv4-address-pool-am
### Containers
- [iGotify](docker-compose/igotify.md)
- [Pi.Alert](docker-compose/Pi.Alert.md)
- [searXNG](docker-compose/searXNG.md)
- [WireGuard](Wireguard.md)
### Login
```bash
$ sudo docker login --username=foo
# WARNING! Your password will be stored unencrypted in /root/.docker/config.json.
```
```bash
$ cat ~/my_password.txt | docker login --username foo --password-stdin
```
https://www.howtogeek.com/devops/how-to-login-to-docker-hub-and-private-registries-with-the-docker-cli/
### Backups
https://mariushosting.com/synology-how-to-back-up-docker-containers/

View File

@@ -0,0 +1,148 @@
# iperf3
### Sur le NAS:
Installation d'iPerf sur le NAS:
```bash
$ sudo docker run -it --rm -p 5201:5201 networkstatic/iperf3 --help
```
Le serveur est lancé:
```bash
$ sudo docker run -it --rm --name=iperf3-server -p 5201:5201 networkstatic/iperf3 -s
-----------------------------------------------------------
Server listening on 5201
-----------------------------------------------------------
```
### Sur le mac:
```bash
$ brew install iperf3
```
On lance le client:
```bash
$ iperf3 -c 192.168.2.57
Connecting to host 192.168.2.57, port 5201
[ 5] local 192.168.2.240 port 60430 connected to 192.168.2.57 port 5201
[ ID] Interval Transfer Bitrate
[ 5] 0.00-1.00 sec 14.2 MBytes 119 Mbits/sec
[ 5] 1.00-2.00 sec 12.9 MBytes 108 Mbits/sec
[ 5] 2.00-3.01 sec 13.0 MBytes 109 Mbits/sec
[ 5] 3.01-4.01 sec 12.0 MBytes 101 Mbits/sec
[ 5] 4.01-5.00 sec 12.5 MBytes 105 Mbits/sec
[ 5] 5.00-6.00 sec 14.0 MBytes 118 Mbits/sec
[ 5] 6.00-7.00 sec 13.4 MBytes 112 Mbits/sec
[ 5] 7.00-8.00 sec 13.0 MBytes 109 Mbits/sec
[ 5] 8.00-9.00 sec 10.0 MBytes 83.7 Mbits/sec
[ 5] 9.00-10.01 sec 10.5 MBytes 88.1 Mbits/sec
- - - - - - - - - - - - - - - - - - - - - - - - -
[ ID] Interval Transfer Bitrate
[ 5] 0.00-10.01 sec 126 MBytes 105 Mbits/sec sender
[ 5] 0.00-10.09 sec 125 MBytes 104 Mbits/sec receiver
iperf Done.
```
### Test externe:
```bash
iperf3 -c scaleway.testdebit.info -p 9215 -R -P 4
Connecting to host scaleway.testdebit.info, port 9215
Reverse mode, remote host scaleway.testdebit.info is sending
[ 7] local 192.168.2.240 port 60728 connected to 62.210.156.7 port 9215
[ 9] local 192.168.2.240 port 60729 connected to 62.210.156.7 port 9215
[ 11] local 192.168.2.240 port 60730 connected to 62.210.156.7 port 9215
[ 13] local 192.168.2.240 port 60731 connected to 62.210.156.7 port 9215
[ ID] Interval Transfer Bitrate
[ 7] 0.00-1.00 sec 384 KBytes 3.14 Mbits/sec
[ 9] 0.00-1.00 sec 256 KBytes 2.09 Mbits/sec
[ 11] 0.00-1.00 sec 384 KBytes 3.14 Mbits/sec
[ 13] 0.00-1.00 sec 384 KBytes 3.14 Mbits/sec
[SUM] 0.00-1.00 sec 1.38 MBytes 11.5 Mbits/sec
- - - - - - - - - - - - - - - - - - - - - - - - -
[ 7] 1.00-2.00 sec 1.88 MBytes 15.8 Mbits/sec
[ 9] 1.00-2.00 sec 1.25 MBytes 10.5 Mbits/sec
[ 11] 1.00-2.00 sec 1.25 MBytes 10.5 Mbits/sec
[ 13] 1.00-2.00 sec 1.25 MBytes 10.5 Mbits/sec
[SUM] 1.00-2.00 sec 5.62 MBytes 47.3 Mbits/sec
- - - - - - - - - - - - - - - - - - - - - - - - -
[ 7] 2.00-3.00 sec 3.50 MBytes 29.3 Mbits/sec
[ 9] 2.00-3.00 sec 1.88 MBytes 15.7 Mbits/sec
[ 11] 2.00-3.00 sec 2.75 MBytes 23.0 Mbits/sec
[ 13] 2.00-3.00 sec 2.00 MBytes 16.8 Mbits/sec
[SUM] 2.00-3.00 sec 10.1 MBytes 84.8 Mbits/sec
- - - - - - - - - - - - - - - - - - - - - - - - -
[ 7] 3.00-4.00 sec 3.12 MBytes 26.2 Mbits/sec
[ 9] 3.00-4.00 sec 1.62 MBytes 13.6 Mbits/sec
[ 11] 3.00-4.00 sec 2.62 MBytes 22.0 Mbits/sec
[ 13] 3.00-4.00 sec 1.75 MBytes 14.7 Mbits/sec
[SUM] 3.00-4.00 sec 9.12 MBytes 76.5 Mbits/sec
- - - - - - - - - - - - - - - - - - - - - - - - -
[ 7] 4.00-5.00 sec 5.25 MBytes 43.9 Mbits/sec
[ 9] 4.00-5.01 sec 3.00 MBytes 25.1 Mbits/sec
[ 11] 4.00-5.01 sec 4.38 MBytes 36.6 Mbits/sec
[ 13] 4.00-5.01 sec 2.88 MBytes 24.1 Mbits/sec
[SUM] 4.00-5.00 sec 15.5 MBytes 130 Mbits/sec
- - - - - - - - - - - - - - - - - - - - - - - - -
[ 7] 5.00-6.00 sec 5.62 MBytes 47.4 Mbits/sec
[ 9] 5.01-6.00 sec 2.88 MBytes 24.2 Mbits/sec
[ 11] 5.01-6.00 sec 4.25 MBytes 35.8 Mbits/sec
[ 13] 5.01-6.00 sec 3.25 MBytes 27.4 Mbits/sec
[SUM] 5.00-6.00 sec 16.0 MBytes 135 Mbits/sec
- - - - - - - - - - - - - - - - - - - - - - - - -
[ 7] 6.00-7.00 sec 6.12 MBytes 51.3 Mbits/sec
[ 9] 6.00-7.00 sec 3.38 MBytes 28.3 Mbits/sec
[ 11] 6.00-7.00 sec 4.62 MBytes 38.7 Mbits/sec
[ 13] 6.00-7.00 sec 3.75 MBytes 31.4 Mbits/sec
[SUM] 6.00-7.00 sec 17.9 MBytes 150 Mbits/sec
- - - - - - - - - - - - - - - - - - - - - - - - -
[ 7] 7.00-8.00 sec 5.62 MBytes 47.3 Mbits/sec
[ 9] 7.00-8.00 sec 3.00 MBytes 25.2 Mbits/sec
[ 11] 7.00-8.00 sec 4.25 MBytes 35.7 Mbits/sec
[ 13] 7.00-8.00 sec 3.75 MBytes 31.5 Mbits/sec
[SUM] 7.00-8.00 sec 16.6 MBytes 140 Mbits/sec
- - - - - - - - - - - - - - - - - - - - - - - - -
[ 7] 8.00-9.00 sec 4.75 MBytes 39.8 Mbits/sec
[ 9] 8.00-9.00 sec 2.50 MBytes 20.9 Mbits/sec
[ 11] 8.00-9.00 sec 3.75 MBytes 31.4 Mbits/sec
[ 13] 8.00-9.00 sec 3.25 MBytes 27.2 Mbits/sec
[SUM] 8.00-9.00 sec 14.2 MBytes 119 Mbits/sec
- - - - - - - - - - - - - - - - - - - - - - - - -
[ 7] 9.00-10.00 sec 4.88 MBytes 40.8 Mbits/sec
[ 9] 9.00-10.00 sec 2.75 MBytes 23.0 Mbits/sec
[ 11] 9.00-10.00 sec 4.12 MBytes 34.6 Mbits/sec
[ 13] 9.00-10.00 sec 3.88 MBytes 32.5 Mbits/sec
[SUM] 9.00-10.00 sec 15.6 MBytes 131 Mbits/sec
- - - - - - - - - - - - - - - - - - - - - - - - -
[ ID] Interval Transfer Bitrate Retr
[ 7] 0.00-10.05 sec 46.7 MBytes 39.0 Mbits/sec 0 sender
[ 7] 0.00-10.00 sec 41.1 MBytes 34.5 Mbits/sec receiver
[ 9] 0.00-10.05 sec 24.9 MBytes 20.8 Mbits/sec 0 sender
[ 9] 0.00-10.00 sec 22.5 MBytes 18.9 Mbits/sec receiver
[ 11] 0.00-10.05 sec 36.0 MBytes 30.1 Mbits/sec 0 sender
[ 11] 0.00-10.00 sec 32.4 MBytes 27.1 Mbits/sec receiver
[ 13] 0.00-10.05 sec 30.0 MBytes 25.1 Mbits/sec 0 sender
[ 13] 0.00-10.00 sec 26.1 MBytes 21.9 Mbits/sec receiver
[SUM] 0.00-10.05 sec 138 MBytes 115 Mbits/sec 0 sender
[SUM] 0.00-10.00 sec 122 MBytes 102 Mbits/sec receiver
iperf Done.
```
https://aradaff.com/tester-son-reseau/

View File

@@ -0,0 +1,34 @@
# Portainer
| Création | docker-run |
| ----------- | ------------------------------------------------ |
| Mise-à-jour | manuelle |
| Port | 9000 |
| Liens | [Github](https://github.com/portainer/portainer) |
### Installation
https://mariushosting.com/synology-30-second-portainer-install-using-task-scheduler-docker/
### Mise-à-jour du container Portainer
https://mariushosting.com/synology-how-to-update-portainer/
Dans Container Manager:
- Onglet Image -> Mise-à-jour disponible -> Mettre à jour
- Onglet Container -> Action -> Démarrer
### Backup configuration
Portainer -> Settings -> Backup up Portainer -> Download backup

View File

@@ -2,21 +2,51 @@
| | | |
| ------------------------------------------------------------ | --------------------------- | ------ |
| mymediaforalexa | 52050 - 52051 | Docker |
| homebridge | Bridge: 51534<br />UI: 8581 | Docker |
| Hoobs | Bridge: 51826<br />UI: 8181 | Docker |
| [PiHole](https://mariushosting.com/how-to-install-pi-hole-on-your-synology-nas/) | 8090 | Docker |
| Lychee | 90 | |
| | | |
| | | |
| | | |
| | | |
| | | Passerelle | Adresse IP | |
| ------------------------------------------------------------ | --------------- | ----------- | ------------ | ------------------ |
| | | | | |
| portainer | **8000** / 9000 | 172.17.0.1 | 172.17.0.3 | |
| geoipupdate | | 172.19.0.1 | 172.19.0.2 | |
| [PiHole](https://mariushosting.com/how-to-install-pi-hole-on-your-synology-nas/) | 8090 | 192.168.2.1 | 192.168.2.68 | 192.168.2.68 |
| freegeoip | **8080** / 8888 | 172.18.0.1 | 172.18.0.2 | |
| Acme | | 172.17.0.1 | 172.17.0.2 | |
| | | | | |
| wireguard | | 172.20.0.1 | 172.20.0.2 | 192.168.2.68:51820 |
| | | | | |
#### Redémarrer Container Manager:
```bash
systemctl restart pkg-ContainerManager-dockerd
```
#### Pare-feu:
- IP: 172.16.0.0
- Masque: 255.248.0.0
De 172.16.0.0 à 172.23.255.255
https://cric.grenoble.cnrs.fr/Administrateurs/Outils/CalculMasque/
https://www.it-connect.fr/adresses-ipv4-et-le-calcul-des-masques-de-sous-reseaux/#VII_Comment_trouver_le_bon_masque_pour_un_nombre_dhotes_specifique
#### Liens:
Créer un utilisateur restreint pour docker:
https://drfrankenstein.co.uk/step-2-setting-up-a-restricted-docker-user-and-obtaining-ids/
https://mariushosting.com/synology-how-to-update-docker-image/
https://www.timmertech.io/manage-docker-without-sudo-on-synology/
https://stackoverflow.com/questions/43720339/docker-error-could-not-find-an-available-non-overlapping-ipv4-address-pool-am

View File

@@ -0,0 +1,116 @@
# Mise-à-jour des containers
### Container manager
##### Onglet Image:
Notifications pour les images dont:
- source: hub.docker.com
- tag: Latest ou Nightly
Cliquer sur 'Update available': l'image est téléchargé et le container mis-à-jour/
##### Onglet Projet:
Si il y a plusieurs containers dans le Projet:
- sélectionner le Projet -> Action -> Arrêt
- mettre à jour les images
- sélectionner le Projet -> Action -> Créer
### Watchtower
| Création | Portainer |
| ----------- | ---------- |
| Mise-à-jour | Watchtower |
| Ports | 8000/9000 |
https://drfrankenstein.co.uk/watchtower-automated-updates-in-container-manager-on-a-synology-nas/
```yaml
version: '2.1'
services:
watchtower:
image: containrrr/watchtower
container_name: watchtower
hostname: watchtower-nas
network_mode: bridge
environment:
- WATCHTOWER_NOTIFICATIONS=email
- WATCHTOWER_CLEANUP=true
- WATCHTOWER_DEBUG=true
- WATCHTOWER_LABEL_ENABLE=true
- WATCHTOWER_TIMEOUT=30s
- WATCHTOWER_POLL_INTERVAL=300
- TZ=Europe/Paris
env_file:
- stack.env
labels:
- "com.centurylinklabs.watchtower.enable=true"
volumes:
- /var/run/docker.sock:/var/run/docker.sock
- /volume1/docker/watchtower/config.json:/root/.docker/config.json
restart: unless-stopped
```
##### Mettre-à-jour seulement les containers spécifiés:
A ajouter dans le compose de watchtower:
```yaml
environment:
- WATCHTOWER_LABEL_ENABLE=true
```
A ajouter aux containers que l'on souhaite mettre-à-jour:
```yaml
labels:
- "com.centurylinklabs.watchtower.enable=true"
```
##### Uniquement monitorer certains containers:
A ajouter aux containers que l'on souhaite suivre:
```yaml
labels:
- "com.centurylinklabs.watchtower.monitor-only=true"
```
##### Monitorer et mettre-à-jour seulement certains containers:
A ajouter dans le compose de watchtower:
```yaml
environment:
- WATCHTOWER_DISABLE_CONTAINERS=container1,container2
```
https://www.smarthomebeginner.com/watchtower-docker-compose-2024/
#### Lancer Watchtower manuellement:
```bash
$ docker run -v /var/run/docker.sock:/var/run/docker.sock containrrr/watchtower --run-once
```
#### Notifications:
https://containrrr.dev/watchtower/notifications/

View File

@@ -0,0 +1,215 @@
# Certificats
Tous les certificats se trouvent dans le dossier `/usr/syno/etc/certificate/_archive`, chacun dans son dossier (ici, cjN5Vb, Ks8Ngt, mqkgNA)
```bash
root@DS923:/usr/syno/etc/certificate/_archive# ls -la
total 72
drwx------ 5 root root 4096 Dec 25 15:50 .
drwxr-xr-x 9 root root 4096 Jan 14 2024 ..
drwx------ 2 root root 4096 Jan 13 2024 cjN5Vb
-rw------- 1 root root 7 Dec 8 01:49 DEFAULT
-rw------- 1 root root 23093 Dec 25 15:50 INFO
drwx------ 2 root root 4096 Nov 14 08:57 Ks8Ngt
drwx------ 2 root root 4096 Dec 8 01:49 mqkgNA
-rwx------ 1 root root 18832 Dec 25 15:48 SERVICES
-rw-r--r-- 1 root root 41 Jan 10 2024 .syno-ca-cert.srl
```
#### DEFAULT indique le certificat par défaut:
```bash
nano DEFAULT
mqkgNA
```
```bash
root@DS923:/usr/syno/etc/certificate/_archive/mqkgNA# ls -la
total 24
drwx------ 2 root root 4096 Dec 8 01:49 .
drwx------ 5 root root 4096 Dec 25 15:50 ..
-r-------- 1 root root 2139 Dec 8 01:49 cert.pem
-r-------- 1 root root 1801 Dec 8 01:49 chain.pem
-r-------- 1 root root 3940 Dec 8 01:49 fullchain.pem
-r-------- 1 root root 3272 Dec 8 01:49 privkey.pem
```
#### INFO liste tous les certificats et leurs utilisations:
**Ks8Ngt** utilisé par Synology DSM
```json
"Ks8Ngt" : {
"desc" : "",
"services" : [
{
"display_name" : "KMIP",
"display_name_i18n" : "remote_key:kmip_tab_title",
"isPkg" : false,
"owner" : "root",
"service" : "kmip",
"subscriber" : "kmip"
},
{
"display_name" : "Hyper Backup Vault",
"display_name_i18n" : "HyperBackupVault:app:package_name",
"isPkg" : true,
"owner" : "root",
"service" : "HyperBackupVault",
"subscriber" : "HyperBackupVault"
},
{
"display_name" : "Replication Service",
"display_name_i18n" : "app:displayname",
"isPkg" : true,
"owner" : "root",
"service" : "snapshot_receiver",
"subscriber" : "ReplicationService"
}
]
},
```
**cjN5Vb** utilisé par Active Backup for Business
```json
"cjN5Vb" : {
"desc" : "Certificate created by Active Backup for Business",
"services" : [
{
"display_name" : "Active Backup",
"display_name_i18n" : "SYNO.ActiveBackup.AppInstance:app:package_name",
"isPkg" : true,
"owner" : "ActiveBackup",
"service" : "ActiveBackup",
"subscriber" : "ActiveBackup"
}
],
"user_deletable" : true
},
```
**mqkgNA** est le certificat Let's Encrypt
```json
"mqkgNA" : {
"desc" : "Let's Encrypt",
"services" : [
{
"display_name" : "Synology Drive Server",
"display_name_i18n" : "SYNO.SDS.Drive.Application:app:pkg_name",
"isPkg" : true,
"owner" : "SynologyDrive",
"service" : "SynologyDrive",
"subscriber" : "SynologyDrive"
},
{
"display_name" : "ds923.photos-nas.ovh",
"isPkg" : false,
"multiple_cert" : true,
"owner" : "root",
"service" : "FQDN",
"subscriber" : "system",
"user_setable" : true
},
{
"display_name" : "photos-nas.ovh:443",
"isPkg" : true,
"multiple_cert" : true,
"owner" : "root",
"service" : "cbd90216-eab3-48ad-b957-fd11af0d558d",
"subscriber" : "WebStation"
},
{
"display_name" : "gitea.photos-nas.ovh",
"isPkg" : false,
"multiple_cert" : true,
"owner" : "root",
"service" : "12c8d3ad-54aa-4782-b929-a8be2fe56a67",
"subscriber" : "ReverseProxy",
"user_setable" : true
},
{
"display_name" : "dockge.photos-nas.ovh",
"isPkg" : false,
"multiple_cert" : true,
"owner" : "root",
"service" : "13a1de2b-3659-44a6-9169-413b6f669684",
"subscriber" : "ReverseProxy",
"user_setable" : true
},
],
"user_deletable" : true
}
}
```
#### SERVICES liste les services de DSM et les Reverses Proxy:
```bash
[
{
"display_name" : "KMIP",
"display_name_i18n" : "remote_key:kmip_tab_title",
"isPkg" : false,
"owner" : "root",
"service" : "kmip",
"subscriber" : "kmip"
},
{
"display_name" : "FTPS",
"isPkg" : false,
"owner" : "root",
"service" : "ftpd",
"subscriber" : "smbftpd"
},
{
"display_name" : "DSM Desktop Service",
"display_name_i18n" : "common:web_desktop",
"isPkg" : false,
"multiple_cert" : true,
"owner" : "root",
"service" : "default",
"subscriber" : "system",
"user_setable" : true
},
{
"display_name" : "home-assistant.photos-nas.ovh",
"isPkg" : false,
"multiple_cert" : true,
"owner" : "root",
"service" : "0aa5255b-fdb5-436b-9a7c-6cc5ad609c6e",
"subscriber" : "ReverseProxy",
"user_setable" : true
},
{
"display_name" : "gitea.photos-nas.ovh",
"isPkg" : false,
"multiple_cert" : true,
"owner" : "root",
"service" : "12c8d3ad-54aa-4782-b929-a8be2fe56a67",
"subscriber" : "ReverseProxy",
"user_setable" : true
},
{
"display_name" : "nmap.photos-nas.ovh",
"isPkg" : false,
"multiple_cert" : true,
"owner" : "root",
"service" : "f1a8b6b3-ec6e-45d7-a113-eb1088efa9b7",
"subscriber" : "ReverseProxy",
"user_setable" : true
}
]
```

View File

@@ -0,0 +1,62 @@
# HomeAssistant
### Package SynoCommunity
Version Core
[FAQ](https://github.com/SynoCommunity/spksrc/wiki/FAQ-HomeAssistant/)
#### Editer la configuration
```bash
sudo nano /var/packages/homeassistant/var/config/configuration.yaml
```
#### Voir les logs
```bash
tail -f /var/packages/homeassistant/var/homeassistant.log
```
### VMM
#### Erreur '400 Bad Request'
##### /homeassistant/configuration.yaml
```yaml
# Loads default set of integrations. Do not remove.
default_config:
# Load frontend themes from the themes folder
frontend:
themes: !include_dir_merge_named themes
automation: !include automations.yaml
script: !include scripts.yaml
scene: !include scenes.yaml
homeassistant:
external_url: "https://home-assistant.photos-nas.ovh" # ne pas indiquer le port
internal_url: "http://192.168.2.21:8123" # adresse locale de HA avec le port
http:
use_x_forwarded_for: true
trusted_proxies:
- 192.168.2.57 # IP of Synology
ip_ban_enabled: false
```
Sir le NAS

Some files were not shown because too many files have changed in this diff Show More