Compare commits
57 Commits
06a955e042
...
main
| Author | SHA1 | Date | |
|---|---|---|---|
| dbe94e9375 | |||
| f0e16eced3 | |||
| cc7eee152f | |||
| d32796c481 | |||
|
|
ab9b10acd9 | ||
| 9a64b7e9af | |||
| 99c38cd4c7 | |||
| a37a5adae3 | |||
| 25abcbb9e4 | |||
| ed53d4cb1f | |||
| f8034e007e | |||
| f37d7bec80 | |||
| 67eb731e2a | |||
| 0915163d34 | |||
|
|
703f2e1d0c | ||
| 1d39da2723 | |||
| ef43428179 | |||
| 92dd8d3c24 | |||
| 34f4f01fe5 | |||
| 3cdb8e01fa | |||
| 8e6d9de9e5 | |||
| caee82e970 | |||
| 0174410211 | |||
| 9c71b23101 | |||
| 3e75644b75 | |||
| 0ba38f645e | |||
| 4a0cccc404 | |||
| 2d3b3f2c2e | |||
| e9d366df33 | |||
| 592bc3f0f0 | |||
| 1064a4ed07 | |||
|
|
8007388d0f | ||
|
|
763fe5070f | ||
|
|
f2daaef425 | ||
|
|
859a750be7 | ||
|
|
56142b79c8 | ||
|
|
c61dca41ea | ||
|
|
c0effc37e4 | ||
|
|
0b22289e56 | ||
|
|
c20d21e222 | ||
|
|
8573a13199 | ||
|
|
ebd8f77b86 | ||
|
|
b533d13329 | ||
|
|
13544d2ae4 | ||
|
|
a6d6c8278c | ||
|
|
82f95faee0 | ||
|
|
cfe19ec9f2 | ||
|
|
3fb9f87cb0 | ||
|
|
f83faafe95 | ||
|
|
d050afd31f | ||
|
|
4328116819 | ||
| c94b135ff6 | |||
| c1e242816b | |||
| 486c973f18 | |||
| 85327d81cd | |||
|
|
943ae99ef0 | ||
|
|
ba683a9cf9 |
@@ -1,29 +0,0 @@
|
|||||||
# This script creates a tar.gz backup of the /opt/AdGuardHome directory, cron jobs, and system logs.
|
|
||||||
# The backup is stored in the './backups/' directory.
|
|
||||||
#
|
|
||||||
# To automate backups, you can add this script to your crontab.
|
|
||||||
# For example, to run this backup on the 1st of every month, you would add the following line to your crontab:
|
|
||||||
# 0 0 1 * * /path/to/this/script.sh
|
|
||||||
# (Remember to replace '/path/to/this/script.sh' with the actual path to this script)
|
|
||||||
# You can edit your crontab with the command: crontab -e
|
|
||||||
|
|
||||||
# Get the current date in a suitable format for the filename
|
|
||||||
current_date=$(date +"%Y-%m-%d")
|
|
||||||
|
|
||||||
# Create the backup filename
|
|
||||||
backup_filename="adguardhome_backup_${current_date}.tar.gz"
|
|
||||||
|
|
||||||
# Default backup location
|
|
||||||
backup_location="./backups/"
|
|
||||||
|
|
||||||
# Create the 'adguardhome' directory if it doesn't exist
|
|
||||||
mkdir -p "$backup_location/adguardhome"
|
|
||||||
|
|
||||||
# Create the tar.gz archive
|
|
||||||
tar -czvf "$backup_location/adguardhome/$backup_filename" /opt/AdGuardHome /var/spool/cron/crontabs/* /var/log
|
|
||||||
|
|
||||||
# Print a success message
|
|
||||||
echo "Backup created successfully in $backup_location/adguardhome/$backup_filename"
|
|
||||||
|
|
||||||
# Delete backups older than 90 days
|
|
||||||
find "$backup_location/adguardhome" -mtime +90 -type f -delete
|
|
||||||
@@ -1,140 +0,0 @@
|
|||||||
#!/bin/bash
|
|
||||||
|
|
||||||
# This script performs an incremental backup of the specified source directory,
|
|
||||||
# generates CRC32 checksums for only the files that are backed up or modified,
|
|
||||||
# and manages the deletion of old backups. The CRC32 checksum files are stored
|
|
||||||
# in a dedicated subdirectory within the backup directory. You can also run the
|
|
||||||
# script with the 'verification' argument to check the integrity of only the files
|
|
||||||
# processed in the latest backup.
|
|
||||||
#
|
|
||||||
# NOTE: This script is not tested and is intended for debugging purposes. Use with caution.
|
|
||||||
|
|
||||||
# Configuration
|
|
||||||
SOURCE_DIR="/chemin/vers/votre/dossier_source" # Replace with the path to your source directory
|
|
||||||
BACKUP_DIR="/chemin/vers/votre/dossier_backup" # Replace with the path to your backup directory
|
|
||||||
LOG_FILE="/chemin/vers/votre/fichier_log" # Replace with the path to your log file
|
|
||||||
RETENTION_PERIOD=60 # Number of days to retain deleted/modified files (2 months)
|
|
||||||
EMAIL="votre_email@example.com" # Replace with your email address
|
|
||||||
CRC_RETENTION_PERIOD=60 # Number of days to retain CRC checksum files
|
|
||||||
|
|
||||||
# Directory for CRC32 checksums within the backup directory
|
|
||||||
CRC_DIR="$BACKUP_DIR/.crc_checksums"
|
|
||||||
mkdir -p "$CRC_DIR" # Create the checksum directory if it does not exist
|
|
||||||
|
|
||||||
# Files for CRC32 checksums
|
|
||||||
SOURCE_CRC_FILE="$CRC_DIR/source_crc_checksums.txt"
|
|
||||||
BACKUP_CRC_FILE="$CRC_DIR/backup_crc_checksums.txt"
|
|
||||||
|
|
||||||
# Temporary files for CRC32 checksums of the current backup
|
|
||||||
CURRENT_BACKUP_CRC_FILE="$CRC_DIR/current_backup_crc_checksums.txt"
|
|
||||||
|
|
||||||
# Function to send an email in case of error
|
|
||||||
send_error_email() {
|
|
||||||
SUBJECT="Backup Error on $(hostname)"
|
|
||||||
MESSAGE="An error occurred during the backup on $(hostname) at $(date +'%Y-%m-%d %H:%M:%S'). Please check the log file at $LOG_FILE for details."
|
|
||||||
echo "$MESSAGE" | mail -s "$SUBJECT" "$EMAIL"
|
|
||||||
}
|
|
||||||
|
|
||||||
# Function to log actions to the log file and send an email in case of error
|
|
||||||
log_action() {
|
|
||||||
echo "$(date +'%Y-%m-%d %H:%M:%S') - $1" >> "$LOG_FILE"
|
|
||||||
if [[ "$1" == "ERREUR"* ]]; then
|
|
||||||
send_error_email
|
|
||||||
fi
|
|
||||||
}
|
|
||||||
|
|
||||||
# Check if the source and backup directories exist
|
|
||||||
if [ ! -d "$SOURCE_DIR" ]; then
|
|
||||||
log_action "ERREUR : The source directory '$SOURCE_DIR' does not exist."
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [ ! -d "$BACKUP_DIR" ]; then
|
|
||||||
log_action "The backup directory '$BACKUP_DIR' does not exist. Creating directory."
|
|
||||||
mkdir -p "$BACKUP_DIR"
|
|
||||||
if [ $? -ne 0 ]; then
|
|
||||||
log_action "ERREUR : Unable to create the backup directory '$BACKUP_DIR'."
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Function to generate CRC32 checksums only for files processed in the current backup
|
|
||||||
generate_crc_checksums() {
|
|
||||||
# Generate checksums for source files
|
|
||||||
find "$SOURCE_DIR" -type f -exec crc32 {} \; > "$SOURCE_CRC_FILE"
|
|
||||||
if [ $? -ne 0 ]; then
|
|
||||||
log_action "ERREUR : Unable to generate CRC32 checksums for the source directory."
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Generate checksums for files in the backup directory
|
|
||||||
find "$BACKUP_DIR" -type f ! -path "$CRC_DIR/*" -exec crc32 {} \; > "$CURRENT_BACKUP_CRC_FILE"
|
|
||||||
if [ $? -ne 0 ]; then
|
|
||||||
log_action "ERREUR : Unable to generate CRC32 checksums for the backup directory."
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
}
|
|
||||||
|
|
||||||
# Function to verify the integrity of files processed in the latest backup
|
|
||||||
verify_integrity() {
|
|
||||||
if [ ! -f "$SOURCE_CRC_FILE" ] || [ ! -f "$CURRENT_BACKUP_CRC_FILE" ]; then
|
|
||||||
log_action "ERREUR : Checksum files do not exist. Run the script without arguments to generate them."
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Compare the checksums and store differences
|
|
||||||
diff "$SOURCE_CRC_FILE" "$CURRENT_BACKUP_CRC_FILE" > /tmp/diff_output.txt
|
|
||||||
if [ $? -eq 0 ]; then
|
|
||||||
echo "La vérification de l'intégrité des fichiers a terminé avec succès."
|
|
||||||
log_action "Backup integrity has been successfully verified."
|
|
||||||
else
|
|
||||||
log_action "ERREUR : Backup integrity check failed. Differences found between source and backup checksums."
|
|
||||||
# Print the errors with file paths
|
|
||||||
while IFS= read -r line; do
|
|
||||||
echo "$line"
|
|
||||||
done < /tmp/diff_output.txt
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
}
|
|
||||||
|
|
||||||
# Function to clean up old CRC32 checksum files
|
|
||||||
cleanup_old_crc_files() {
|
|
||||||
find "$CRC_DIR" -type f -mtime +$CRC_RETENTION_PERIOD -exec rm -f {} \;
|
|
||||||
if [ $? -ne 0 ]; then
|
|
||||||
log_action "ERREUR : Unable to delete old CRC32 checksum files."
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
}
|
|
||||||
|
|
||||||
# If the 'verification' argument is provided, only perform the verification
|
|
||||||
if [ "$1" == "verification" ]; then
|
|
||||||
verify_integrity
|
|
||||||
exit 0
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Incremental backup with error handling
|
|
||||||
# This section performs the incremental backup of the source directory to the backup directory
|
|
||||||
# Deleted files are moved to a dated subdirectory. The checksum directory is excluded from the backup.
|
|
||||||
rsync -av --delete --backup --backup-dir="$BACKUP_DIR/deleted/$(date +'%Y-%m-%d')" --exclude "$CRC_DIR/" "$SOURCE_DIR/" "$BACKUP_DIR/"
|
|
||||||
if [ $? -ne 0 ]; then
|
|
||||||
log_action "ERREUR : The incremental backup failed."
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Generate CRC32 checksums for the files processed in the latest backup
|
|
||||||
generate_crc_checksums
|
|
||||||
|
|
||||||
# Verify the integrity of the files processed in the latest backup
|
|
||||||
verify_integrity
|
|
||||||
|
|
||||||
# Delete old backups of deleted/modified files
|
|
||||||
find "$BACKUP_DIR/deleted" -type d -mtime +$RETENTION_PERIOD -exec rm -rf {} \;
|
|
||||||
if [ $? -ne 0 ]; then
|
|
||||||
log_action "ERREUR : Unable to delete old backups."
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Clean up old CRC32 checksum files
|
|
||||||
cleanup_old_crc_files
|
|
||||||
|
|
||||||
log_action "Incremental backup completed successfully."
|
|
||||||
133
backup_and_restore/sauvegarde_docker/README.md
Normal file
133
backup_and_restore/sauvegarde_docker/README.md
Normal file
@@ -0,0 +1,133 @@
|
|||||||
|
# Sauvegarde Docker
|
||||||
|
|
||||||
|
Système de sauvegarde automatique pour services Docker avec gestion intelligente des conteneurs.
|
||||||
|
|
||||||
|
## Description
|
||||||
|
|
||||||
|
Ce projet contient deux scripts Bash pour la sauvegarde et la vérification des services Docker :
|
||||||
|
|
||||||
|
- **`sauvegarde_docker.sh`** : Script principal de sauvegarde automatique
|
||||||
|
- **`verification_sauvegarde_docker.sh`** : Script de vérification de l'intégrité des sauvegardes
|
||||||
|
|
||||||
|
## Fonctionnalités
|
||||||
|
|
||||||
|
### Script de sauvegarde (`sauvegarde_docker.sh`)
|
||||||
|
|
||||||
|
- **Sauvegarde intelligente** : Détecte automatiquement les conteneurs Docker actifs
|
||||||
|
- **Arrêt/redémarrage sécurisé** : Arrête temporairement les conteneurs pendant la sauvegarde pour garantir la cohérence
|
||||||
|
- **Archivage compressé** : Création d'archives `.tar.gz` avec horodatage
|
||||||
|
- **Vérification d'intégrité** : Génération automatique de checksums SHA-256
|
||||||
|
- **Rétention automatique** : Suppression des sauvegardes de plus de 7 jours
|
||||||
|
- **Journalisation complète** : Logs détaillés avec horodatage
|
||||||
|
- **Gestion d'erreurs** : Statistiques et codes de retour appropriés
|
||||||
|
|
||||||
|
### Script de vérification (`verification_sauvegarde_docker.sh`)
|
||||||
|
|
||||||
|
- **Vérification d'intégrité** : Contrôle des checksums SHA-256
|
||||||
|
- **Rapport visuel** : Statut clair avec symboles (✓, ✗, ⚠)
|
||||||
|
- **Détection des anomalies** : Identification des fichiers corrompus ou manquants
|
||||||
|
|
||||||
|
## Configuration
|
||||||
|
|
||||||
|
### Paramètres par défaut
|
||||||
|
|
||||||
|
```bash
|
||||||
|
SRV_DIR="/home/docker/srv" # Répertoire source des services
|
||||||
|
BACKUP_DEST="/home/docker/backup" # Répertoire de destination
|
||||||
|
RETENTION_DAYS=7 # Durée de rétention en jours
|
||||||
|
LOG_FILE="/home/docker/docker-backup.log" # Fichier de log
|
||||||
|
```
|
||||||
|
|
||||||
|
### Prérequis
|
||||||
|
|
||||||
|
- **Docker** : Requis pour la gestion des conteneurs (optionnel)
|
||||||
|
- **Bash** : Version 4.0 ou supérieure
|
||||||
|
- **Espace disque** : Au moins 1 GB disponible (vérification automatique)
|
||||||
|
- **Permissions** : Accès en écriture aux répertoires de sauvegarde
|
||||||
|
|
||||||
|
## Installation
|
||||||
|
|
||||||
|
1. Cloner ou télécharger les scripts dans un répertoire
|
||||||
|
2. Rendre les scripts exécutables :
|
||||||
|
```bash
|
||||||
|
chmod +x sauvegarde_docker.sh
|
||||||
|
chmod +x verification_sauvegarde_docker.sh
|
||||||
|
```
|
||||||
|
3. Adapter les chemins dans la configuration si nécessaire
|
||||||
|
|
||||||
|
## Utilisation
|
||||||
|
|
||||||
|
### Sauvegarde manuelle
|
||||||
|
|
||||||
|
```bash
|
||||||
|
./sauvegarde_docker.sh
|
||||||
|
```
|
||||||
|
|
||||||
|
### Vérification des sauvegardes
|
||||||
|
|
||||||
|
```bash
|
||||||
|
./verification_sauvegarde_docker.sh
|
||||||
|
```
|
||||||
|
|
||||||
|
### Automatisation avec cron
|
||||||
|
|
||||||
|
Exemple pour une sauvegarde quotidienne à 2h00 :
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Éditer la crontab
|
||||||
|
crontab -e
|
||||||
|
|
||||||
|
# Ajouter la ligne suivante
|
||||||
|
0 2 * * * /chemin/vers/sauvegarde_docker.sh
|
||||||
|
```
|
||||||
|
|
||||||
|
## Structure des sauvegardes
|
||||||
|
|
||||||
|
```
|
||||||
|
/home/docker/backup/
|
||||||
|
├── service1/
|
||||||
|
│ ├── service1_2025-01-15_02-00-01.tar.gz
|
||||||
|
│ ├── service1_2025-01-15_02-00-01.tar.gz.sha256
|
||||||
|
│ └── service1_2025-01-16_02-00-01.tar.gz
|
||||||
|
├── service2/
|
||||||
|
│ └── service2_2025-01-16_02-00-01.tar.gz
|
||||||
|
└── docker-backup.log
|
||||||
|
```
|
||||||
|
|
||||||
|
## Logs et monitoring
|
||||||
|
|
||||||
|
### Fichier de log
|
||||||
|
|
||||||
|
Le fichier `/home/docker/docker-backup.log` contient :
|
||||||
|
- Horodatage de chaque opération
|
||||||
|
- Statut des conteneurs Docker
|
||||||
|
- Taille des archives créées
|
||||||
|
- Erreurs et avertissements
|
||||||
|
- Statistiques de fin d'exécution
|
||||||
|
|
||||||
|
### Codes de retour
|
||||||
|
|
||||||
|
- **0** : Sauvegarde réussie sans erreur
|
||||||
|
- **1** : Erreurs détectées (vérifier les logs)
|
||||||
|
|
||||||
|
## Sécurité et bonnes pratiques
|
||||||
|
|
||||||
|
- Les conteneurs sont arrêtés proprement avant sauvegarde
|
||||||
|
- Attente de 5 secondes après arrêt pour garantir la cohérence
|
||||||
|
- Vérification automatique des checksums
|
||||||
|
- Gestion des erreurs avec tentatives de récupération
|
||||||
|
- Logs détaillés pour audit et débogage
|
||||||
|
|
||||||
|
## Dépannage
|
||||||
|
|
||||||
|
### Problèmes courants
|
||||||
|
|
||||||
|
1. **Docker non accessible** : Vérifier l'installation et les permissions
|
||||||
|
2. **Espace disque insuffisant** : Libérer de l'espace ou ajuster la rétention
|
||||||
|
3. **Conteneur ne redémarre pas** : Vérifier les logs Docker et la configuration
|
||||||
|
|
||||||
|
### Vérification des logs
|
||||||
|
|
||||||
|
```bash
|
||||||
|
tail -f /home/docker/docker-backup.log
|
||||||
|
```
|
||||||
270
backup_and_restore/sauvegarde_docker/sauvegarde_docker.sh
Executable file
270
backup_and_restore/sauvegarde_docker/sauvegarde_docker.sh
Executable file
@@ -0,0 +1,270 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
# Description
|
||||||
|
# This script performs a backup of all folders in /srv
|
||||||
|
# For folders corresponding to active Docker containers,
|
||||||
|
# containers are stopped during backup then restarted
|
||||||
|
# Containers listed in EXCLUDED_CONTAINERS are ignored
|
||||||
|
# Backups are kept for 7 days
|
||||||
|
|
||||||
|
# Configuration
|
||||||
|
SRV_DIR="/srv"
|
||||||
|
BACKUP_DEST="/root/backup/docker"
|
||||||
|
RETENTION_DAYS=7
|
||||||
|
LOG_FILE="/root/backup/docker/docker-backup.log"
|
||||||
|
|
||||||
|
# List of containers to exclude from backup (space separated)
|
||||||
|
# Example: EXCLUDED_CONTAINERS="traefik mysql-prod redis-cache"
|
||||||
|
EXCLUDED_CONTAINERS=""
|
||||||
|
|
||||||
|
# Logging function
|
||||||
|
log_message() {
|
||||||
|
echo "$(date '+%Y-%m-%d %H:%M:%S') - $1" | tee -a "$LOG_FILE"
|
||||||
|
}
|
||||||
|
|
||||||
|
# Check if destination directory exists
|
||||||
|
if [ ! -d "$BACKUP_DEST" ]; then
|
||||||
|
log_message "Creating destination directory $BACKUP_DEST"
|
||||||
|
mkdir -p "$BACKUP_DEST"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Check available disk space (at least 1GB)
|
||||||
|
available_space=$(df "$BACKUP_DEST" | awk 'NR==2 {print $4}')
|
||||||
|
if [ "$available_space" -lt 1048576 ]; then
|
||||||
|
log_message "WARNING: Low disk space (less than 1GB available)"
|
||||||
|
fi
|
||||||
|
|
||||||
|
log_message "Starting Docker backup"
|
||||||
|
|
||||||
|
# Counters for statistics
|
||||||
|
success_count=0
|
||||||
|
error_count=0
|
||||||
|
docker_stopped=()
|
||||||
|
docker_errors=()
|
||||||
|
|
||||||
|
# Function to backup a directory
|
||||||
|
backup_directory() {
|
||||||
|
local source_dir="$1"
|
||||||
|
local folder_name="$2"
|
||||||
|
|
||||||
|
if [ ! -e "$source_dir" ]; then
|
||||||
|
log_message "WARNING: $source_dir does not exist, skipped"
|
||||||
|
((error_count++))
|
||||||
|
return 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Create destination folder for this service
|
||||||
|
local dest_folder="$BACKUP_DEST/$folder_name"
|
||||||
|
mkdir -p "$dest_folder"
|
||||||
|
|
||||||
|
local filename="${folder_name}_$(date +%Y-%m-%d_%H-%M-%S).tar.gz"
|
||||||
|
local filepath="$dest_folder/$filename"
|
||||||
|
|
||||||
|
log_message "Backing up $source_dir to $folder_name/$filename"
|
||||||
|
|
||||||
|
# Create the archive
|
||||||
|
if tar -czf "$filepath" -C "$(dirname "$source_dir")" "$(basename "$source_dir")" 2>/dev/null; then
|
||||||
|
# Create SHA-256 checksum file
|
||||||
|
sha256sum "$filepath" > "$filepath.sha256"
|
||||||
|
file_size=$(du -h "$filepath" | cut -f1)
|
||||||
|
log_message "SUCCESS: $filename created ($file_size) - Checksum generated"
|
||||||
|
((success_count++))
|
||||||
|
return 0
|
||||||
|
else
|
||||||
|
log_message "ERROR: Unable to create archive for $source_dir"
|
||||||
|
((error_count++))
|
||||||
|
return 1
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
# Function to check if Docker is installed
|
||||||
|
is_docker_available() {
|
||||||
|
command -v docker >/dev/null 2>&1
|
||||||
|
}
|
||||||
|
|
||||||
|
# Function to check if a container is excluded
|
||||||
|
is_container_excluded() {
|
||||||
|
local container_name="$1"
|
||||||
|
if [ -n "$EXCLUDED_CONTAINERS" ]; then
|
||||||
|
for excluded in $EXCLUDED_CONTAINERS; do
|
||||||
|
if [ "$container_name" = "$excluded" ]; then
|
||||||
|
return 0
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
fi
|
||||||
|
return 1
|
||||||
|
}
|
||||||
|
|
||||||
|
# Function to check if a Docker container exists and is running
|
||||||
|
is_container_running() {
|
||||||
|
local container_name="$1"
|
||||||
|
if is_docker_available; then
|
||||||
|
docker ps --format "table {{.Names}}" | grep -q "^${container_name}$" 2>/dev/null
|
||||||
|
else
|
||||||
|
return 1
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
# Function to stop a Docker container
|
||||||
|
stop_container() {
|
||||||
|
local container_name="$1"
|
||||||
|
log_message "Stopping Docker container: $container_name"
|
||||||
|
if docker stop "$container_name" >/dev/null 2>&1; then
|
||||||
|
log_message "Container $container_name stopped successfully"
|
||||||
|
docker_stopped+=("$container_name")
|
||||||
|
return 0
|
||||||
|
else
|
||||||
|
log_message "ERROR: Unable to stop container $container_name"
|
||||||
|
docker_errors+=("$container_name")
|
||||||
|
return 1
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
# Function to start a Docker container
|
||||||
|
start_container() {
|
||||||
|
local container_name="$1"
|
||||||
|
log_message "Starting Docker container: $container_name"
|
||||||
|
if docker start "$container_name" >/dev/null 2>&1; then
|
||||||
|
log_message "Container $container_name started successfully"
|
||||||
|
return 0
|
||||||
|
else
|
||||||
|
log_message "ERROR: Unable to start container $container_name"
|
||||||
|
return 1
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
# Check if Docker is available
|
||||||
|
if ! is_docker_available; then
|
||||||
|
log_message "WARNING: Docker is not installed or accessible"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Backup directories in /srv
|
||||||
|
log_message "=== DOCKER SERVICES BACKUP ==="
|
||||||
|
if [ -d "$SRV_DIR" ]; then
|
||||||
|
# Loop through all folders in /srv
|
||||||
|
for srv_folder in "$SRV_DIR"/*; do
|
||||||
|
if [ -d "$srv_folder" ]; then
|
||||||
|
folder_name=$(basename "$srv_folder")
|
||||||
|
|
||||||
|
# Check if the container is in the exclusion list
|
||||||
|
if is_container_excluded "$folder_name"; then
|
||||||
|
log_message "EXCLUSION: Container $folder_name skipped (in exclusion list)"
|
||||||
|
continue
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Check if a Docker container with this name exists and is running
|
||||||
|
if is_container_running "$folder_name"; then
|
||||||
|
log_message "Active Docker container detected: $folder_name"
|
||||||
|
|
||||||
|
# Stop the container
|
||||||
|
if stop_container "$folder_name"; then
|
||||||
|
# Wait a bit to ensure the container is completely stopped
|
||||||
|
sleep 5
|
||||||
|
|
||||||
|
# Perform the backup
|
||||||
|
backup_directory "$srv_folder" "$folder_name"
|
||||||
|
|
||||||
|
# Restart the container
|
||||||
|
start_container "$folder_name"
|
||||||
|
else
|
||||||
|
log_message "WARNING: Backing up $folder_name without stopping container (risk of inconsistency)"
|
||||||
|
backup_directory "$srv_folder" "$folder_name"
|
||||||
|
fi
|
||||||
|
else
|
||||||
|
# No corresponding Docker container, normal backup
|
||||||
|
log_message "Service without active container: $folder_name"
|
||||||
|
backup_directory "$srv_folder" "$folder_name"
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
else
|
||||||
|
log_message "ERROR: Directory $SRV_DIR does not exist"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Verification and generation of checksums for all archives
|
||||||
|
log_message "=== CHECKSUM VERIFICATION AND GENERATION ==="
|
||||||
|
archives_without_checksum=0
|
||||||
|
archives_with_checksum=0
|
||||||
|
|
||||||
|
find "$BACKUP_DEST" -name "*.tar.gz" -type f | while read -r archive_file; do
|
||||||
|
checksum_file="${archive_file}.sha256"
|
||||||
|
if [ ! -f "$checksum_file" ]; then
|
||||||
|
log_message "Generating missing checksum for $(basename "$archive_file")"
|
||||||
|
sha256sum "$archive_file" > "$checksum_file"
|
||||||
|
((archives_without_checksum++))
|
||||||
|
else
|
||||||
|
((archives_with_checksum++))
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
|
||||||
|
# Final archive count
|
||||||
|
total_archives=$(find "$BACKUP_DEST" -name "*.tar.gz" -type f | wc -l)
|
||||||
|
missing_checksums=$(find "$BACKUP_DEST" -name "*.tar.gz" -type f | while read -r archive; do [ ! -f "${archive}.sha256" ] && echo "$archive"; done | wc -l)
|
||||||
|
|
||||||
|
log_message "Archives found: $total_archives"
|
||||||
|
if [ "$missing_checksums" -eq 0 ]; then
|
||||||
|
log_message "SUCCESS: All archives have a checksum file"
|
||||||
|
else
|
||||||
|
log_message "WARNING: $missing_checksums archives without checksum"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Cleanup of old backups
|
||||||
|
log_message "=== OLD BACKUPS CLEANUP ==="
|
||||||
|
for service_dir in "$BACKUP_DEST"/*/; do
|
||||||
|
if [ -d "$service_dir" ]; then
|
||||||
|
service_name=$(basename "$service_dir")
|
||||||
|
log_message "Removing backups older than $RETENTION_DAYS days for $service_name"
|
||||||
|
|
||||||
|
# Find obsolete files (archives and checksums separately)
|
||||||
|
deleted_files_tar=$(find "$service_dir" -type f -name "*.tar.gz" -mtime +$RETENTION_DAYS -print 2>/dev/null)
|
||||||
|
deleted_files_sha=$(find "$service_dir" -type f -name "*.sha256" -mtime +$RETENTION_DAYS -print 2>/dev/null)
|
||||||
|
|
||||||
|
if [ -n "$deleted_files_tar" ] || [ -n "$deleted_files_sha" ]; then
|
||||||
|
# Remove and log archives
|
||||||
|
if [ -n "$deleted_files_tar" ]; then
|
||||||
|
echo "$deleted_files_tar" | while read -r file; do
|
||||||
|
log_message "Removing: $service_name/$(basename "$file")"
|
||||||
|
done
|
||||||
|
find "$service_dir" -type f -name "*.tar.gz" -mtime +$RETENTION_DAYS -delete 2>/dev/null
|
||||||
|
fi
|
||||||
|
# Remove and log checksums
|
||||||
|
if [ -n "$deleted_files_sha" ]; then
|
||||||
|
echo "$deleted_files_sha" | while read -r file; do
|
||||||
|
log_message "Removing: $service_name/$(basename "$file")"
|
||||||
|
done
|
||||||
|
find "$service_dir" -type f -name "*.sha256" -mtime +$RETENTION_DAYS -delete 2>/dev/null
|
||||||
|
fi
|
||||||
|
else
|
||||||
|
log_message "No obsolete files to remove for $service_name"
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
|
||||||
|
# Restart containers that failed to restart
|
||||||
|
if [ ${#docker_errors[@]} -gt 0 ]; then
|
||||||
|
log_message "=== RETRY STARTING CONTAINERS IN ERROR ==="
|
||||||
|
for container in "${docker_errors[@]}"; do
|
||||||
|
start_container "$container"
|
||||||
|
done
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Final statistics
|
||||||
|
log_message "=== BACKUP SUMMARY ==="
|
||||||
|
log_message "Successful backups: $success_count"
|
||||||
|
log_message "Errors: $error_count"
|
||||||
|
if [ ${#docker_stopped[@]} -gt 0 ]; then
|
||||||
|
log_message "Docker containers managed: ${docker_stopped[*]}"
|
||||||
|
fi
|
||||||
|
if [ ${#docker_errors[@]} -gt 0 ]; then
|
||||||
|
log_message "Docker containers in error: ${docker_errors[*]}"
|
||||||
|
fi
|
||||||
|
log_message "Backup completed"
|
||||||
|
|
||||||
|
# Exit code based on errors
|
||||||
|
if [ $error_count -gt 0 ] || [ ${#docker_errors[@]} -gt 0 ]; then
|
||||||
|
exit 1
|
||||||
|
else
|
||||||
|
exit 0
|
||||||
|
fi
|
||||||
|
|
||||||
@@ -0,0 +1,48 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
# Script simple pour vérifier toutes les sauvegardes Docker
|
||||||
|
|
||||||
|
BACKUP_DIR="/var/backup/docker"
|
||||||
|
|
||||||
|
echo "=== VÉRIFICATION DES SAUVEGARDES DOCKER ==="
|
||||||
|
echo ""
|
||||||
|
|
||||||
|
# Parcourir tous les dossiers de services
|
||||||
|
for service_dir in "$BACKUP_DIR"/*/; do
|
||||||
|
if [ -d "$service_dir" ]; then
|
||||||
|
service_name=$(basename "$service_dir")
|
||||||
|
echo "Service: $service_name"
|
||||||
|
echo "------------------------"
|
||||||
|
|
||||||
|
# Vérifier chaque sauvegarde
|
||||||
|
found_backup=false
|
||||||
|
for backup_file in "$service_dir"/*.tar.gz; do
|
||||||
|
if [ -f "$backup_file" ]; then
|
||||||
|
found_backup=true
|
||||||
|
backup_name=$(basename "$backup_file")
|
||||||
|
checksum_file="${backup_file}.sha256"
|
||||||
|
|
||||||
|
echo -n " $backup_name ... "
|
||||||
|
|
||||||
|
if [ -f "$checksum_file" ]; then
|
||||||
|
cd "$service_dir"
|
||||||
|
if sha256sum -c "$(basename "$checksum_file")" >/dev/null 2>&1; then
|
||||||
|
echo "✓ OK"
|
||||||
|
else
|
||||||
|
echo "✗ CORROMPU"
|
||||||
|
fi
|
||||||
|
else
|
||||||
|
echo "⚠ PAS DE CHECKSUM"
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
|
||||||
|
if [ "$found_backup" = false ]; then
|
||||||
|
echo " Aucune sauvegarde trouvée"
|
||||||
|
fi
|
||||||
|
|
||||||
|
echo ""
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
|
||||||
|
echo "Vérification terminée."
|
||||||
|
|
||||||
245
backup_and_restore/scaleway/methode_de_sauvegarde.md
Normal file
245
backup_and_restore/scaleway/methode_de_sauvegarde.md
Normal file
@@ -0,0 +1,245 @@
|
|||||||
|
# Table des matières
|
||||||
|
- [Table des matières](#table-des-matières)
|
||||||
|
- [Méthode de sauvegarde](#méthode-de-sauvegarde)
|
||||||
|
- [Synchronisation des fichiers personnels](#synchronisation-des-fichiers-personnels)
|
||||||
|
- [Transfert des sauvegardes avec rclone](#transfert-des-sauvegardes-avec-rclone)
|
||||||
|
- [Chiffrement des sauvegardes avec rclone](#chiffrement-des-sauvegardes-avec-rclone)
|
||||||
|
- [Tester les sauvegardes](#tester-les-sauvegardes)
|
||||||
|
- [Sécurité des transferts](#sécurité-des-transferts)
|
||||||
|
- [Stockage externe](#stockage-externe)
|
||||||
|
- [Scaleway Cold Storage](#scaleway-cold-storage)
|
||||||
|
- [Kdrive ou Swiss Backup](#kdrive-ou-swiss-backup)
|
||||||
|
- [Exemple de script Bash pour sauvegarder des dossiers](#exemple-de-script-bash-pour-sauvegarder-des-dossiers)
|
||||||
|
- [Exemple de script Bash pour le transfert avec rclone](#exemple-de-script-bash-pour-le-transfert-avec-rclone)
|
||||||
|
- [Fichier de configuration rclone](#fichier-de-configuration-rclone)
|
||||||
|
- [Exemple de commande pour télécharger les sauvegardes chiffrées](#exemple-de-commande-pour-télécharger-les-sauvegardes-chiffrées)
|
||||||
|
- [Passage de la classe de stockage Glacier vers Standard](#passage-de-la-classe-de-stockage-glacier-vers-standard)
|
||||||
|
- [Liste des objets stockés en Glacier](#liste-des-objets-stockés-en-glacier)
|
||||||
|
- [Restauration des objets Glacier](#restauration-des-objets-glacier)
|
||||||
|
- [Liste des fichiers restaurés](#liste-des-fichiers-restaurés)
|
||||||
|
|
||||||
|
## Méthode de sauvegarde
|
||||||
|
|
||||||
|
**Méthode 3-2-1** La méthode de sauvegarde que j'utilise est basée sur le principe 3-2-1, qui est considéré comme une bonne pratique en matière de sauvegarde des données. J'ai 3 copies de mes données, stockées sur 2 types de supports différents, avec 1 copie hors site.
|
||||||
|
|
||||||
|
**Ma méthode de sauvegarde :**
|
||||||
|
|
||||||
|
1. **Sauvegardes quotidiennes :**
|
||||||
|
|
||||||
|
- Les sauvegardes des serveurs, VM/VPS et configurations sont effectuées quotidiennement par l'utilisateur root.
|
||||||
|
- Ces sauvegardes sont conservées pendant 7 jours.
|
||||||
|
- Elles sont transférées sur le NAS et stockées à la fois sur la machine source et le NAS, puis synchronisées sur Scaleway.
|
||||||
|
2. **Sauvegardes mensuelles :**
|
||||||
|
|
||||||
|
- Les sauvegardes des serveurs et VM/VPS sont effectuées le premier jour de chaque mois.
|
||||||
|
- Ces sauvegardes sont conservées pendant 3 mois.
|
||||||
|
- Elles sont directement stockées sur le NAS ou transférées après leur création, puis synchronisées sur Scaleway.
|
||||||
|
3. **Sauvegardes manuelles :**
|
||||||
|
|
||||||
|
- Deux copies des sauvegardes des serveurs, des dossiers personnels (photos/vidéos) et des VM/VPS sont effectuées manuellement tous les trois ou six mois.
|
||||||
|
- Ces sauvegardes sont stockées sur un ou deux disques durs externes, qui sont connectés uniquement pour le transfert.
|
||||||
|
4. **Sauvegarde Proxmox :**
|
||||||
|
|
||||||
|
- Sur Proxmox, j'utilise l'interface graphique pour effectuer des sauvegardes quotidiennes des VMs et des conteneurs. Ces sauvegardes sont conservées pendant 7 jours sur le stockage local de Proxmox.
|
||||||
|
- Les sauvegardes mensuelles sont ensuite transférées directement sur le NAS avec le point de montage NFS.
|
||||||
|
|
||||||
|

|
||||||
|
Vous pouvez trouver plus d'informations sur la [sauvegarde Proxmox dans la documentation officielle](https://pve.proxmox.com/wiki/Backup_and_Restore).
|
||||||
|
|
||||||
|
## Synchronisation des fichiers personnels
|
||||||
|
|
||||||
|
Mes photos, vidéos, clés SSH, sauvegardes de jeux, documents personnels et gestionnaire de mots de passe sont synchronisés sur mon ordinateur portable, mon téléphone et mon PC fixe. Cela signifie que j'ai à tout moment ces fichiers synchronisés avec **Syncthing**. Sur Proxmox, j'ai un serveur Syncthing qui est lui-même sauvegardé selon les méthodes 1, 2 et 3 mentionnées précédemment.
|
||||||
|
|
||||||
|
## Transfert des sauvegardes avec rclone
|
||||||
|
|
||||||
|
Pour le transfert des sauvegardes, j'utilise l'outil en ligne de commande **[rclone](https://rclone.org/overview/)**. Rclone est compatible avec de nombreux services de stockage tels que SFTP, Google Drive, Dropbox, Amazon S3, Scaleway, Proton Drive et bien d'autres. Cela me permet de gérer facilement les différents stockages et de synchroniser les données de manière efficace.
|
||||||
|
|
||||||
|
## Chiffrement des sauvegardes avec rclone
|
||||||
|
|
||||||
|
Rclone offre une fonctionnalité de chiffrement intégrée appelée "crypt", qui me permet de chiffrer les fichiers uniquement sur le stockage distant (Scaleway, dans notre cas), sans avoir à les chiffrer localement. Ainsi, mes données sont sécurisées sur le service de stockage, mais restent lisibles sur mon système local. Lorsque je souhaite accéder à ces sauvegardes, je les déchiffre et les télécharge en utilisant rclone avec ce fichier de configuration.
|
||||||
|
|
||||||
|
## Tester les sauvegardes
|
||||||
|
|
||||||
|
Il est essentiel de tester régulièrement la restauration de vos sauvegardes pour vous assurer qu'elles sont fonctionnelles et que vous pouvez bien récupérer vos données en cas de besoin. Cela vous permettra d'identifier d'éventuels problèmes et de prendre les mesures correctives nécessaires.
|
||||||
|
|
||||||
|
Je recommande de tester la restauration de vos sauvegardes au moins une fois par trimestre. Choisissez des fichiers ou des données représentatifs, restaurez-les et vérifiez qu'ils sont bien récupérés et utilisables. Cette étape est cruciale pour garantir la fiabilité de votre système de sauvegarde.
|
||||||
|
|
||||||
|
## Sécurité des transferts
|
||||||
|
|
||||||
|
Pour sécuriser les transferts de sauvegardes vers le NAS, il est important d'ouvrir les ports nécessaires sur le pare-feu et de mettre en place une liste blanche des adresses IP autorisées à se connecter. De plus, il est recommandé de créer un utilisateur dédié pour chaque machine qui a les droits d'accès spécifiques à son dossier de sauvegarde sur le NAS.
|
||||||
|
|
||||||
|
Cela permet de limiter les risques d'accès non autorisés et de garantir la confidentialité des données sauvegardées. Il est également important de s'assurer que les communications entre les machines et le NAS soient chiffrées (SSH, SFTP, etc.).
|
||||||
|
|
||||||
|
## Stockage externe
|
||||||
|
|
||||||
|
### Scaleway Cold Storage
|
||||||
|
|
||||||
|
Pour le stockage externe à long terme, j'utilise le service de stockage froid (Cold Storage) de [Scaleway](https://www.scaleway.com/en/glacier-cold-storage/). Mes données sont stockées dans un abri sous-terrain sécurisé à un coût très abordable de 0,002 € par gigaoctet et par mois. Cela me permet de conserver des copies de mes sauvegardes les plus importantes à un faible coût tout en bénéficiant d'un stockage sécurisé et durable.
|
||||||
|
|
||||||
|
### Kdrive ou Swiss Backup
|
||||||
|
|
||||||
|
Il existe également la solution [Kdrive](https://www.infomaniak.com/fr/ksuite/kdrive) ou [Swiss Backup](https://www.infomaniak.com/fr/swiss-backup) que vous pouvez utiliser. Il est possible de monter le disque sur votre ordinateur via le protocole WebDAV ou avec Rclone. Il faut découper les fichiers en blocs de moins de 45 gigaoctets avec Rclone, puis ces blocs seront automatiquement reconstitués lors du téléchargement.
|
||||||
|
|
||||||
|
## Exemple de script Bash pour sauvegarder des dossiers
|
||||||
|
|
||||||
|
Voici un exemple de script Bash qui permet de sauvegarder des dossiers de manière automatique :
|
||||||
|
|
||||||
|
```bash
|
||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
# Description
|
||||||
|
# Ce script effectue une sauvegarde quotidienne des dossiers spécifiés.
|
||||||
|
# Les sauvegardes sont conservées pendant 7 jours et transférées sur le NAS.
|
||||||
|
# Le script est exécuté via une tâche cron tous les jours à 3h du matin.
|
||||||
|
|
||||||
|
# Dossiers à sauvegarder
|
||||||
|
BACKUP_DIRS=("/home/user/documents" "/home/user/photos" "/etc/config")
|
||||||
|
|
||||||
|
# Destination des sauvegardes
|
||||||
|
BACKUP_DEST="/mnt/nas/backups"
|
||||||
|
|
||||||
|
# Nombre de jours de conservation
|
||||||
|
RETENTION_DAYS=7
|
||||||
|
|
||||||
|
# Exécution de la sauvegarde
|
||||||
|
for dir in "${BACKUP_DIRS[@]}"; do
|
||||||
|
filename="$(basename "$dir")_$(date +%Y-%m-%d).tar.gz"
|
||||||
|
tar -czf "$BACKUP_DEST/$filename" "$dir"
|
||||||
|
done
|
||||||
|
|
||||||
|
# Suppression des sauvegardes obsolètes
|
||||||
|
find "$BACKUP_DEST" -type f -mtime +$RETENTION_DAYS -delete
|
||||||
|
|
||||||
|
# Synchronisation des sauvegardes sur Scaleway
|
||||||
|
rclone sync "$BACKUP_DEST" remote:backups
|
||||||
|
|
||||||
|
# Fin du script
|
||||||
|
echo "Sauvegarde terminée."
|
||||||
|
```
|
||||||
|
|
||||||
|
**Utilisation avec Crontab :**
|
||||||
|
Pour exécuter ce script automatiquement tous les jours à 3h du matin, ajoutez la ligne suivante à votre fichier Crontab (crontab -e) :
|
||||||
|
|
||||||
|
```
|
||||||
|
0 3 * * * /chemin/vers/votre/script.sh
|
||||||
|
```
|
||||||
|
|
||||||
|
Cela permettra d'effectuer les sauvegardes quotidiennes de manière régulière.
|
||||||
|
|
||||||
|
## Exemple de script Bash pour le transfert avec rclone
|
||||||
|
|
||||||
|
Voici un exemple de script Bash qui permet de transférer les sauvegardes vers le NAS et Scaleway à l'aide de rclone :
|
||||||
|
|
||||||
|
```bash
|
||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
# Description
|
||||||
|
# Ce script transfère les sauvegardes quotidiennes et mensuelles vers le NAS et Scaleway.
|
||||||
|
# Il est exécuté via une tâche cron le premier de chaque mois à 4h du matin.
|
||||||
|
|
||||||
|
# Dossiers à transférer
|
||||||
|
BACKUP_DIRS=("/mnt/nas/backups")
|
||||||
|
|
||||||
|
# Destination des transferts
|
||||||
|
NAS_REMOTE="remote:nas/backups"
|
||||||
|
SCALEWAY_REMOTE="remote:scaleway/backups"
|
||||||
|
|
||||||
|
# Exécution du transfert
|
||||||
|
for dir in "${BACKUP_DIRS[@]}"; do
|
||||||
|
rclone sync "$dir" "$NAS_REMOTE"
|
||||||
|
rclone sync "$dir" "$SCALEWAY_REMOTE"
|
||||||
|
done
|
||||||
|
|
||||||
|
# Fin du script
|
||||||
|
echo "Transfert terminé."
|
||||||
|
```
|
||||||
|
|
||||||
|
**Utilisation avec Crontab :**
|
||||||
|
Pour exécuter ce script automatiquement le premier de chaque mois à 4h du matin, ajoutez la ligne suivante à votre fichier Crontab (crontab -e) :
|
||||||
|
|
||||||
|
```
|
||||||
|
0 4 1 * * /chemin/vers/votre/rclone_script.sh
|
||||||
|
```
|
||||||
|
|
||||||
|
Cela permettra de transférer les sauvegardes mensuelles de manière régulière.
|
||||||
|
|
||||||
|
|
||||||
|
## Fichier de configuration rclone
|
||||||
|
|
||||||
|
Rclone offre une fonctionnalité de chiffrement intégrée appelée "crypt", qui me permet de chiffrer les fichiers uniquement sur le stockage distant (Scaleway, dans notre cas), sans avoir à les chiffrer localement. Ainsi, mes données sont sécurisées sur le service de stockage, mais restent lisibles sur mon système local. Lorsque je souhaite accéder à ces sauvegardes, je les déchiffre et les télécharge en utilisant rclone avec ce fichier de configuration.
|
||||||
|
|
||||||
|
Rclone se charge de générer les mots de passe nécessaires au chiffrement et au déchiffrement des données lors de la configuration du remote "crypt". Vous pouvez alors sauvegarder ce fichier de configuration rclone.conf et le transférer sur d'autres machines si nécessaire, afin d'accéder à vos sauvegardes chiffrées depuis différents endroits.
|
||||||
|
|
||||||
|
Le fichier de configuration rclone.conf est généralement placé dans l'un des emplacements suivants :
|
||||||
|
|
||||||
|
- Sur Linux/Unix : `~/.config/rclone/rclone.conf` (répertoire personnel de l'utilisateur)
|
||||||
|
- Sur Windows : `%USERPROFILE%\.config\rclone\rclone.conf` (répertoire personnel de l'utilisateur)
|
||||||
|
- Sur macOS : `~/Library/Application Support/rclone/rclone.conf` (répertoire personnel de l'utilisateur)
|
||||||
|
|
||||||
|
Vous pouvez également le placer à un emplacement de votre choix, mais vous devrez alors spécifier le chemin complet lors de l'utilisation de rclone.
|
||||||
|
|
||||||
|
Les principaux paramètres à configurer sont :
|
||||||
|
|
||||||
|
- `access_key_id` et `secret_access_key` : à remplacer par vos propres identifiants Scaleway.
|
||||||
|
- `region = fr-par` : région de stockage en France.
|
||||||
|
- `storage_class = GLACIER` : utilisation du stockage froid Glacier.
|
||||||
|
- `password` et `password2` : à remplacer par vos propres mots de passe forts pour le chiffrement.
|
||||||
|
|
||||||
|
Lorsque je souhaite accéder à ces sauvegardes, je les déchiffre et les télécharge en utilisant rclone avec ce fichier de configuration.
|
||||||
|
|
||||||
|
## Exemple de commande pour télécharger les sauvegardes chiffrées
|
||||||
|
|
||||||
|
```
|
||||||
|
rclone ls crypt_scaleway:backups
|
||||||
|
rclone cat crypt_scaleway:backups/filename.tar.gz | tar xzf -
|
||||||
|
```
|
||||||
|
|
||||||
|
Explications :
|
||||||
|
|
||||||
|
1. `rclone ls crypt_scaleway:backups` : Cette commande liste le contenu du dossier "backups" sur le remote "crypt_scaleway". Cela vous permet de voir quels fichiers sont présents.
|
||||||
|
|
||||||
|
2. `rclone cat crypt_scaleway:backups/filename.tar.gz | tar xzf -` : Cette commande télécharge le fichier "filename.tar.gz" depuis le dossier "backups" du remote "crypt_scaleway", le déchiffre automatiquement, puis l'extrait dans le répertoire courant.
|
||||||
|
|
||||||
|
Le remote "crypt_scaleway" fait référence à la section `[crypt]` de votre fichier de configuration rclone. Il permet d'accéder aux fichiers chiffrés sur le stockage Scaleway.
|
||||||
|
|
||||||
|
Assurez-vous d'avoir correctement configuré les paramètres de chiffrement (mot de passe, etc.) dans votre fichier de configuration rclone.conf avant d'exécuter ces commandes.
|
||||||
|
|
||||||
|
## Passage de la classe de stockage Glacier vers Standard
|
||||||
|
|
||||||
|
Lorsque vous avez des fichiers stockés dans la classe de stockage Glacier de Scaleway, vous pouvez changer leur classe de stockage individuellement via la console Scaleway. Cependant, si vous avez de nombreux fichiers à restaurer, cette méthode peut s'avérer fastidieuse.
|
||||||
|
|
||||||
|
Les scripts ci-dessous vous permettent de gérer de manière automatisée le passage de la classe Glacier vers la classe Standard pour l'ensemble des fichiers dans un ou plusieurs dossiers. Avant d'utiliser ces scripts, vous devez configurer l'AWS CLI pour accéder à votre compte Scaleway, en suivant la [documentation officielle](https://www.scaleway.com/en/docs/object-storage/api-cli/object-storage-aws-cli/).
|
||||||
|
|
||||||
|
Vous pouvez télécharger les scripts dans le dossier avec ce [lien](https://git.favrep.ch/lapatatedouce/scripts-admin-debian/src/branch/main/backup_and_restore/scaleway).
|
||||||
|
|
||||||
|
### Liste des objets stockés en Glacier
|
||||||
|
|
||||||
|
Le script `list-glacier-objects.sh` vous permet de lister tous les objets stockés dans la classe Glacier d'un bucket Scaleway spécifique, avec la possibilité de filtrer par répertoire.
|
||||||
|
|
||||||
|
```
|
||||||
|
./list-glacier-objects.sh my-bucket my-directory
|
||||||
|
```
|
||||||
|
|
||||||
|
Cela générera un fichier texte contenant la liste des objets à restaurer.
|
||||||
|
|
||||||
|
### Restauration des objets Glacier
|
||||||
|
|
||||||
|
Le script `update_class_standard.sh` lit la liste des objets générée précédemment et initie la restauration de ces objets depuis Glacier vers la classe de stockage Standard. Il vérifie au préalable que l'objet est bien encore en Glacier et qu'aucune restauration n'est en cours.
|
||||||
|
|
||||||
|
```
|
||||||
|
./update_class_standard.sh my-bucket object-list.txt 3
|
||||||
|
```
|
||||||
|
|
||||||
|
Cela restaurera les objets pendant 3 jours dans la classe Standard.
|
||||||
|
|
||||||
|
### Liste des fichiers restaurés
|
||||||
|
|
||||||
|
Enfin, le script `list-file-bucket.sh` vous permet de lister tous les objets d'un bucket Scaleway, y compris ceux qui ont été restaurés depuis Glacier.
|
||||||
|
|
||||||
|
```
|
||||||
|
./list-file-bucket.sh my-bucket my-directory
|
||||||
|
```
|
||||||
|
|
||||||
|
Cela vous permet de vérifier que la restauration s'est bien déroulée.
|
||||||
|
|
||||||
|
Pour réaliser les scripts, je me suis basé sur la [documentation officielle de Scaleway](https://www.scaleway.com/en/docs/object-storage/how-to/restore-an-object-from-glacier/).
|
||||||
BIN
backup_and_restore/scaleway/proxmox_backup.png
Normal file
BIN
backup_and_restore/scaleway/proxmox_backup.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 123 KiB |
88
miscellaneous/ readme_chrooted_SFTP-only.md
Normal file
88
miscellaneous/ readme_chrooted_SFTP-only.md
Normal file
@@ -0,0 +1,88 @@
|
|||||||
|
# Chrooted SFTP-Only Access Configuration
|
||||||
|
|
||||||
|
This guide describes how to set up a chrooted environment with SFTP-only access for users, using SSH keys.
|
||||||
|
|
||||||
|
## Prerequisites
|
||||||
|
|
||||||
|
- A server running GNU/Linux
|
||||||
|
- Root access to the server.
|
||||||
|
- OpenSSH installed and running.
|
||||||
|
|
||||||
|
## Steps
|
||||||
|
|
||||||
|
### 1. Create a Chroot User
|
||||||
|
|
||||||
|
```bash
|
||||||
|
adduser <username>
|
||||||
|
```
|
||||||
|
|
||||||
|
### 2. Create SFTP Group
|
||||||
|
|
||||||
|
```bash
|
||||||
|
groupadd sftpusers
|
||||||
|
```
|
||||||
|
|
||||||
|
### 3. Add the User to SFTP Group
|
||||||
|
|
||||||
|
```bash
|
||||||
|
usermod -aG sftpusers <username>
|
||||||
|
```
|
||||||
|
|
||||||
|
### 4. Setup Chroot Directory
|
||||||
|
|
||||||
|
Create a directory for SFTP users, ensuring proper ownership and permissions.
|
||||||
|
|
||||||
|
```bash
|
||||||
|
mkdir -p /sftp/<username>
|
||||||
|
chown root:root /sftp
|
||||||
|
chmod 755 /sftp
|
||||||
|
mkdir /sftp/<username>
|
||||||
|
chown <username>:<username> /sftp/<username>
|
||||||
|
chmod 700 /sftp/<username>
|
||||||
|
```
|
||||||
|
|
||||||
|
### 5. Configure SSH for SFTP Access
|
||||||
|
|
||||||
|
Modify `/etc/ssh/sshd_config` to use internal SFTP and set restrictions.
|
||||||
|
|
||||||
|
1. Update the `Subsystem` line:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
Subsystem sftp internal-sftp
|
||||||
|
```
|
||||||
|
|
||||||
|
2. Add a `Match` block at the end:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
Match Group sftpusers
|
||||||
|
ChrootDirectory /sftp/%u
|
||||||
|
ForceCommand internal-sftp
|
||||||
|
AllowTcpForwarding no
|
||||||
|
X11Forwarding no
|
||||||
|
```
|
||||||
|
|
||||||
|
### 6. Setup User's SSH Keys
|
||||||
|
|
||||||
|
Create and configure SSH directories for the user:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
mkdir /home/<username>/.ssh
|
||||||
|
touch /home/<username>/.ssh/authorized_keys
|
||||||
|
chmod 700 /home/<username>/.ssh
|
||||||
|
chmod 600 /home/<username>/.ssh/authorized_keys
|
||||||
|
chown <username>:<username> /home/<username>/.ssh
|
||||||
|
chown <username>:<username> /home/<username>/.ssh/authorized_keys
|
||||||
|
```
|
||||||
|
|
||||||
|
Copy the public SSH key to `/home/<username>/.ssh/authorized_keys`.
|
||||||
|
|
||||||
|
### 7. Restart SSH Service
|
||||||
|
|
||||||
|
```bash
|
||||||
|
systemctl restart sshd
|
||||||
|
```
|
||||||
|
|
||||||
|
## Verification
|
||||||
|
|
||||||
|
- Attempt an SFTP connection to verify restricted access.
|
||||||
|
- Ensure users cannot access the shell.
|
||||||
35
miscellaneous/archive_files.sh
Normal file
35
miscellaneous/archive_files.sh
Normal file
@@ -0,0 +1,35 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
# This script archives each file and directory in the current directory into its respective 7z archive.
|
||||||
|
# It ensures that the created archives are not included in the loop to avoid an infinite loop.
|
||||||
|
# It also checks if 7z is installed and installs it if necessary.
|
||||||
|
|
||||||
|
# Usage:
|
||||||
|
# 1. Save this script to a file, e.g., archive_files.sh.
|
||||||
|
# 2. Make the script executable: chmod +x archive_files.sh.
|
||||||
|
# 3. Run the script in the directory containing the files and directories to be archived: ./archive_files.sh.
|
||||||
|
|
||||||
|
# Check if 7z is installed
|
||||||
|
if ! command -v 7z &> /dev/null; then
|
||||||
|
echo "7z is not installed. Installing 7z..."
|
||||||
|
sudo apt-get update
|
||||||
|
sudo apt-get install -y p7zip-full
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Check if the current directory contains any files or directories
|
||||||
|
if [ -z "$(ls -A .)" ]; then
|
||||||
|
echo "No files or directories to archive in the current directory."
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Loop through each item in the current directory
|
||||||
|
for item in *; do
|
||||||
|
# Check if the item is not a 7z archive
|
||||||
|
if [[ ! "$item" =~ \.7z$ ]]; then
|
||||||
|
# Create a 7z archive for the file or directory
|
||||||
|
7z a "${item}.7z" "$item"
|
||||||
|
echo "Archive created for $item: ${item}.7z"
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
|
||||||
|
echo "All files and directories have been archived."
|
||||||
40
miscellaneous/clean_file_names.sh
Normal file
40
miscellaneous/clean_file_names.sh
Normal file
@@ -0,0 +1,40 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
# this script cleans up the names of files and directories in the current directory.
|
||||||
|
# it ensures that names only contain letters, numbers, or the characters "-", "_", and ".".
|
||||||
|
# spaces and accented characters are replaced with allowed characters.
|
||||||
|
|
||||||
|
# usage:
|
||||||
|
# 1. save this script to a file, e.g., rename_files.sh
|
||||||
|
# 2. make the script executable: chmod +x rename_files.sh
|
||||||
|
# 3. run the script in the directory you want to clean up: ./rename_files.sh
|
||||||
|
|
||||||
|
# function to "clean" the names of files and directories
|
||||||
|
# the function converts uppercase to lowercase, replaces accented characters with their base equivalents,
|
||||||
|
# and replaces any character that is not a letter, digit, dot, underscore, or hyphen with an underscore.
|
||||||
|
clean_name() {
|
||||||
|
echo "$1" | tr '[:upper:]' '[:lower:]' | sed -e 's/[àáâãäå]/a/g' \
|
||||||
|
-e 's/[èéêë]/e/g' \
|
||||||
|
-e 's/[ìíîï]/i/g' \
|
||||||
|
-e 's/[òóôõö]/o/g' \
|
||||||
|
-e 's/[ùúûü]/u/g' \
|
||||||
|
-e 's/[ç]/c/g' \
|
||||||
|
-e 's/[^a-zA-Z0-9._-]/_/g'
|
||||||
|
}
|
||||||
|
|
||||||
|
# loop through all files and directories in the current directory
|
||||||
|
for item in *; do
|
||||||
|
# check if the item exists (avoid errors if nothing is found)
|
||||||
|
if [ -e "$item" ]; then
|
||||||
|
# get the new "cleaned" name
|
||||||
|
new_name=$(clean_name "$item")
|
||||||
|
# if the new name is different from the old one, rename the item
|
||||||
|
if [ "$item" != "$new_name" ]; then
|
||||||
|
mv "$item" "$new_name"
|
||||||
|
echo "renamed: '$item' -> '$new_name'"
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
|
||||||
|
# display a message indicating that all files and directories have been processed
|
||||||
|
echo "all files and directories have been processed."
|
||||||
15
miscellaneous/clean_free_space.sh
Normal file
15
miscellaneous/clean_free_space.sh
Normal file
@@ -0,0 +1,15 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
# This script is used to free up unused disk space by temporarily filling free space with zeros.
|
||||||
|
# It can be scheduled to run automatically using cron.
|
||||||
|
|
||||||
|
# Change the working directory to /root
|
||||||
|
cd /root
|
||||||
|
|
||||||
|
# Create a file filled with zeros to occupy the remaining free space
|
||||||
|
dd if=/dev/zero | pv | dd of=grosfichier
|
||||||
|
|
||||||
|
# Wait for 5 seconds before continuing
|
||||||
|
sleep 5
|
||||||
|
|
||||||
|
# Delete the temporary file to free up disk space
|
||||||
|
rm grosfichier
|
||||||
101
miscellaneous/crc32_checksum_manager.sh
Normal file
101
miscellaneous/crc32_checksum_manager.sh
Normal file
@@ -0,0 +1,101 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
# This script manages CRC32 checksums for files and directories.
|
||||||
|
# It can create a checksum file for specified files or directories and verify them later.
|
||||||
|
#
|
||||||
|
# Usage:
|
||||||
|
# To create a checksum file for a file or directory:
|
||||||
|
# ./crc32_checksum_manager.sh --create /path/to/file_or_directory [output_file.txt]
|
||||||
|
# To verify checksums using a checksum file:
|
||||||
|
# ./crc32_checksum_manager.sh --verify /path/to/checksums.txt
|
||||||
|
# Use -h or --help for usage information.
|
||||||
|
|
||||||
|
# Error Messages:
|
||||||
|
# ERROR: INVALID_PATH: $path
|
||||||
|
# Indicates that the specified path is neither a file nor a directory.
|
||||||
|
# ERROR: CHECKSUM_FILE_NOT_FOUND: $checksum_file
|
||||||
|
# Indicates that the specified checksum file does not exist.
|
||||||
|
# ERROR: CHECKSUM_INVALID: $file
|
||||||
|
# Indicates that the calculated checksum for the file does not match the expected checksum.
|
||||||
|
# ERROR: MISSING_ARGUMENT: [specific_message]
|
||||||
|
# Indicates that a required argument (file or directory or checksum file) is missing.
|
||||||
|
# ERROR: INVALID_OPTION: $1
|
||||||
|
# Indicates that an invalid option was provided to the script.
|
||||||
|
|
||||||
|
# Function to display a help message
|
||||||
|
show_help() {
|
||||||
|
echo "Usage: $0 [OPTION] [FILE_OR_DIRECTORY] [OUTPUT_FILE]"
|
||||||
|
echo "Options:"
|
||||||
|
echo " -c, --create Create a CRC32 checksum file for the specified file or directory."
|
||||||
|
echo " Optionally specify an output file name for the checksums (default: checksums.txt)."
|
||||||
|
echo " -v, --verify Verify the CRC32 checksums of the specified files or directory."
|
||||||
|
echo " -h, --help Display this help message."
|
||||||
|
}
|
||||||
|
|
||||||
|
# Function to create a CRC32 checksum file
|
||||||
|
create_checksum() {
|
||||||
|
local path="$1" # Path to the file or directory to create checksums for
|
||||||
|
local output_file="${2:-checksums.txt}" # Name of the output file where checksums will be saved, defaulting to 'checksums.txt'
|
||||||
|
|
||||||
|
if [ -d "$path" ]; then # Check if the path is a directory
|
||||||
|
# Find all files in the directory and calculate their CRC32 checksums
|
||||||
|
find "$path" -type f -exec sh -c 'crc32 "$1" | awk "{print \$1, \"$1\"}"' _ {} \; > "$output_file"
|
||||||
|
elif [ -f "$path" ]; then # Check if the path is a file
|
||||||
|
# Calculate the CRC32 checksum for the file and save it to the output file
|
||||||
|
crc32 "$path" > "$output_file"
|
||||||
|
else # If the path is neither a file nor a directory
|
||||||
|
echo "ERROR: INVALID_PATH: $path"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
# Function to verify CRC32 checksums
|
||||||
|
verify_checksum() {
|
||||||
|
local checksum_file="$1" # Path to the checksum file
|
||||||
|
|
||||||
|
if [ ! -f "$checksum_file" ]; then # Check if the checksum file exists
|
||||||
|
echo "ERROR: CHECKSUM_FILE_NOT_FOUND: $checksum_file"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Read each line of the checksum file
|
||||||
|
while read -r line; do
|
||||||
|
checksum=$(echo "$line" | awk '{print $1}') # Extract the checksum from the line
|
||||||
|
file=$(echo "$line" | awk '{print $2}') # Extract the file path from the line
|
||||||
|
# Compare the calculated checksum with the stored checksum
|
||||||
|
if [ "$(crc32 "$file")" != "$checksum" ]; then
|
||||||
|
echo "ERROR: CHECKSUM_INVALID: $file"
|
||||||
|
fi
|
||||||
|
done < "$checksum_file" # Redirect the checksum file to the while loop
|
||||||
|
}
|
||||||
|
|
||||||
|
# Check the script's arguments and execute the appropriate functions
|
||||||
|
case "$1" in
|
||||||
|
-c|--create)
|
||||||
|
if [ -z "$2" ]; then # Check if a file or directory was provided
|
||||||
|
echo "ERROR: MISSING_ARGUMENT: No file or directory specified."
|
||||||
|
show_help
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Call the create_checksum function with the provided path and optionally a custom output file
|
||||||
|
create_checksum "$2" "$3"
|
||||||
|
;;
|
||||||
|
-v|--verify)
|
||||||
|
if [ -z "$2" ]; then # Check if a checksum file was provided
|
||||||
|
echo "ERROR: MISSING_ARGUMENT: No checksum file specified."
|
||||||
|
show_help
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
verify_checksum "$2" # Call the verify_checksum function with the provided checksum file
|
||||||
|
;;
|
||||||
|
-h|--help)
|
||||||
|
show_help # Display the help message
|
||||||
|
;;
|
||||||
|
*)
|
||||||
|
echo "ERROR: INVALID_OPTION: $1"
|
||||||
|
show_help
|
||||||
|
exit 1
|
||||||
|
;;
|
||||||
|
esac
|
||||||
23
miscellaneous/readme_zfs_8gb_arc_limit.md
Normal file
23
miscellaneous/readme_zfs_8gb_arc_limit.md
Normal file
@@ -0,0 +1,23 @@
|
|||||||
|
To limit the amount of RAM allocated to the ZFS ARC cache to 8 GB on TrueNAS, you can adjust a system configuration setting. Here’s how to do it:
|
||||||
|
|
||||||
|
1. **Access the TrueNAS Interface**: Log in to the TrueNAS web interface.
|
||||||
|
|
||||||
|
2. **Open the Shell**: Go to the top menu and click on **Shell** to open a terminal.
|
||||||
|
|
||||||
|
3. **Modify the ZFS Configuration**:
|
||||||
|
- Enter the following command to limit the ARC to 8 GB:
|
||||||
|
```shell
|
||||||
|
sysctl vfs.zfs.arc.max=8589934592
|
||||||
|
```
|
||||||
|
- This command sets the maximum ARC size to 8 GB (8 GB in bytes = 8589934592).
|
||||||
|
|
||||||
|
4. **Make the Change Persistent**:
|
||||||
|
- To ensure this setting applies on reboot, add it to the startup configuration file.
|
||||||
|
- Use the following command to append it to `sysctl.conf`:
|
||||||
|
```shell
|
||||||
|
echo "vfs.zfs.arc.max=8589934592" >> /etc/sysctl.conf
|
||||||
|
```
|
||||||
|
|
||||||
|
5. **Restart the System**: It's recommended to restart TrueNAS to make sure the setting takes effect.
|
||||||
|
|
||||||
|
This will effectively limit the ZFS ARC to 8 GB, allowing more RAM to be available for other processes.
|
||||||
245
miscellaneous/secure_ssh.sh
Normal file
245
miscellaneous/secure_ssh.sh
Normal file
@@ -0,0 +1,245 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
# This script configures the SSH server according to Mozilla's best security practices.
|
||||||
|
# It backs up the original SSH configuration file, generates a new secure configuration file,
|
||||||
|
# and then restarts the SSH service to apply the changes.
|
||||||
|
|
||||||
|
# Usage:
|
||||||
|
# 1. Save this script as "secure_ssh.sh".
|
||||||
|
# 2. Make it executable with: `chmod +x secure_ssh.sh`.
|
||||||
|
# 3. Run it with root privileges using: `sudo ./secure_ssh.sh`.
|
||||||
|
|
||||||
|
# Variables
|
||||||
|
SSHD_CONFIG="/etc/ssh/sshd_config"
|
||||||
|
BACKUP_DIR="/etc/ssh/backup_sshd_config"
|
||||||
|
DATE_STR=$(date +"%Y%m%d_%H%M%S")
|
||||||
|
SSHD_SERVICE=""
|
||||||
|
|
||||||
|
# Function to validate an IP address
|
||||||
|
validate_ip() {
|
||||||
|
local ip=$1
|
||||||
|
local stat=1
|
||||||
|
|
||||||
|
if [[ $ip =~ ^([0-9]{1,3}\.){3}[0-9]{1,3}$ ]]; then
|
||||||
|
OIFS=$IFS
|
||||||
|
IFS='.'
|
||||||
|
ip=($ip)
|
||||||
|
IFS=$OIFS
|
||||||
|
if [[ ${ip[0]} -le 255 && ${ip[1]} -le 255 && ${ip[2]} -le 255 && ${ip[3]} -le 255 ]]; then
|
||||||
|
stat=0
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
|
||||||
|
return $stat
|
||||||
|
}
|
||||||
|
|
||||||
|
# Function to validate the SSH port
|
||||||
|
validate_ssh_port() {
|
||||||
|
local port=$1
|
||||||
|
local stat=1
|
||||||
|
|
||||||
|
if [[ "$port" =~ ^[0-9]+$ ]] && [[ "$port" -gt 0 && "$port" -le 65535 ]]; then
|
||||||
|
stat=0
|
||||||
|
fi
|
||||||
|
|
||||||
|
return $stat
|
||||||
|
}
|
||||||
|
|
||||||
|
# Detect the name of the SSH service
|
||||||
|
detect_sshd_service() {
|
||||||
|
if systemctl list-units --type=service | grep -q 'sshd'; then
|
||||||
|
echo "sshd"
|
||||||
|
elif systemctl list-units --type=service | grep -q 'ssh'; then
|
||||||
|
echo "ssh"
|
||||||
|
else
|
||||||
|
echo ""
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
# Function to restart the SSH service
|
||||||
|
restart_sshd() {
|
||||||
|
local service=$1
|
||||||
|
systemctl restart "$service"
|
||||||
|
return $?
|
||||||
|
}
|
||||||
|
|
||||||
|
# Create a backup of the SSH configuration file
|
||||||
|
backup_sshd_config() {
|
||||||
|
mkdir -p "$BACKUP_DIR"
|
||||||
|
BACKUP_FILE="${BACKUP_DIR}/sshd_config.${DATE_STR}.bak"
|
||||||
|
cp "$SSHD_CONFIG" "$BACKUP_FILE"
|
||||||
|
if [[ $? -ne 0 ]]; then
|
||||||
|
echo "Error creating backup of the SSH configuration file."
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
echo "SSH configuration backup created: $BACKUP_FILE"
|
||||||
|
}
|
||||||
|
|
||||||
|
# Prompt for the SSH port
|
||||||
|
prompt_ssh_port() {
|
||||||
|
while true; do
|
||||||
|
read -p "Please enter the SSH port (default 22): " SSH_PORT
|
||||||
|
SSH_PORT=${SSH_PORT:-22}
|
||||||
|
if validate_ssh_port "$SSH_PORT"; then
|
||||||
|
echo "SSH port used: $SSH_PORT"
|
||||||
|
break
|
||||||
|
else
|
||||||
|
echo "Invalid port. The port must be an integer between 1 and 65535."
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
}
|
||||||
|
|
||||||
|
# Prompt to restrict SSH access to specific IP addresses
|
||||||
|
prompt_allowed_ips() {
|
||||||
|
read -p "Do you want to restrict SSH access to specific IP addresses? (yes/no): " RESTRICT_ACCESS
|
||||||
|
ALLOWED_IPS=""
|
||||||
|
if [[ "$RESTRICT_ACCESS" =~ ^([oO]ui|[yY]es)$ ]]; then
|
||||||
|
while true; do
|
||||||
|
read -p "Enter a comma-separated list of allowed IP addresses (e.g. 192.168.1.1,10.0.0.1): " ALLOWED_IPS_INPUT
|
||||||
|
IFS=',' read -ra IP_ARRAY <<< "$ALLOWED_IPS_INPUT"
|
||||||
|
VALID=true
|
||||||
|
ALLOWED_IPS_CLEAN=""
|
||||||
|
for ip in "${IP_ARRAY[@]}"; do
|
||||||
|
ip=$(echo "$ip" | xargs) # Remove spaces
|
||||||
|
if ! validate_ip "$ip"; then
|
||||||
|
echo "Invalid IP address: $ip"
|
||||||
|
VALID=false
|
||||||
|
break
|
||||||
|
fi
|
||||||
|
ALLOWED_IPS_CLEAN+="$ip "
|
||||||
|
done
|
||||||
|
if $VALID; then
|
||||||
|
ALLOWED_IPS="${ALLOWED_IPS_CLEAN% }" # Remove trailing space
|
||||||
|
echo "SSH access allowed from: $ALLOWED_IPS"
|
||||||
|
break
|
||||||
|
else
|
||||||
|
echo "Please enter valid IP addresses."
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
else
|
||||||
|
echo "SSH access allowed from all IP addresses."
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
# Generate a new SSH configuration file
|
||||||
|
generate_new_sshd_config() {
|
||||||
|
cat <<EOL > "$SSHD_CONFIG"
|
||||||
|
# Secure SSH configuration according to Mozilla's recommendations
|
||||||
|
|
||||||
|
# Protocol version
|
||||||
|
Protocol 2
|
||||||
|
|
||||||
|
# Secure ciphers
|
||||||
|
Ciphers chacha20-poly1305@openssh.com,aes256-gcm@openssh.com,aes128-gcm@openssh.com,aes256-ctr,aes192-ctr,aes128-ctr
|
||||||
|
|
||||||
|
# Secure key exchange algorithms
|
||||||
|
KexAlgorithms curve25519-sha256,curve25519-sha256@libssh.org,diffie-hellman-group-exchange-sha256
|
||||||
|
|
||||||
|
# Secure MAC algorithms
|
||||||
|
MACs hmac-sha2-256-etm@openssh.com,hmac-sha2-512-etm@openssh.com,hmac-sha2-256,hmac-sha2-512
|
||||||
|
|
||||||
|
# Disable password authentication
|
||||||
|
PasswordAuthentication no
|
||||||
|
|
||||||
|
# Secure host keys (make sure they exist)
|
||||||
|
HostKey /etc/ssh/ssh_host_ed25519_key
|
||||||
|
HostKey /etc/ssh/ssh_host_rsa_key
|
||||||
|
|
||||||
|
# Restrict root access (root will not be able to connect)
|
||||||
|
PermitRootLogin no
|
||||||
|
|
||||||
|
# Strict login policy
|
||||||
|
MaxAuthTries 3
|
||||||
|
LoginGraceTime 30
|
||||||
|
|
||||||
|
# Specified SSH port
|
||||||
|
Port $SSH_PORT
|
||||||
|
|
||||||
|
# Other recommended configurations
|
||||||
|
PermitEmptyPasswords no
|
||||||
|
ChallengeResponseAuthentication no
|
||||||
|
UsePAM yes
|
||||||
|
AllowTcpForwarding no
|
||||||
|
X11Forwarding no
|
||||||
|
|
||||||
|
# IP address access restrictions (if specified)
|
||||||
|
EOL
|
||||||
|
|
||||||
|
if [[ -n "$ALLOWED_IPS" ]]; then
|
||||||
|
echo "" >> "$SSHD_CONFIG"
|
||||||
|
echo "# IP address access restriction" >> "$SSHD_CONFIG"
|
||||||
|
echo "Match Address $(echo "$ALLOWED_IPS" | tr ' ' ',')" >> "$SSHD_CONFIG"
|
||||||
|
echo " AllowUsers *" >> "$SSHD_CONFIG"
|
||||||
|
echo "Match Address *,!$(echo "$ALLOWED_IPS" | tr ' ' ',')" >> "$SSHD_CONFIG"
|
||||||
|
echo " DenyUsers *" >> "$SSHD_CONFIG"
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
# Test the syntax of the new SSH configuration
|
||||||
|
test_sshd_config() {
|
||||||
|
sshd -t
|
||||||
|
return $?
|
||||||
|
}
|
||||||
|
|
||||||
|
# Detect the SSH service in use
|
||||||
|
detect_service() {
|
||||||
|
SSHD_SERVICE=$(detect_sshd_service)
|
||||||
|
if [[ -z "$SSHD_SERVICE" ]]; then
|
||||||
|
echo "Error: Unable to detect the SSH service (sshd or ssh)."
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
# Main process
|
||||||
|
main() {
|
||||||
|
# Warning message before proceeding with the script.
|
||||||
|
echo "Warning: Before running this script, make sure you have created a user and an SSH key in the authorized_keys file."
|
||||||
|
echo "The root user will not be able to connect."
|
||||||
|
|
||||||
|
read -p "Do you want to continue? (yes/no): " CONTINUE
|
||||||
|
|
||||||
|
if [[ ! "$CONTINUE" =~ ^([yY]es|[oO]ui)$ ]]; then
|
||||||
|
echo "Exiting script."
|
||||||
|
exit 0
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Detect the SSH service
|
||||||
|
detect_service
|
||||||
|
|
||||||
|
# Backup current configuration
|
||||||
|
backup_sshd_config
|
||||||
|
|
||||||
|
# Prompt for the SSH port
|
||||||
|
prompt_ssh_port
|
||||||
|
|
||||||
|
# Prompt for allowed IP addresses
|
||||||
|
prompt_allowed_ips
|
||||||
|
|
||||||
|
# Generate the new configuration file
|
||||||
|
generate_new_sshd_config
|
||||||
|
echo "New SSH configuration file generated."
|
||||||
|
|
||||||
|
# Test the syntax of the new configuration
|
||||||
|
if test_sshd_config; then
|
||||||
|
echo "The new SSH configuration is valid."
|
||||||
|
else
|
||||||
|
echo "Error: The new SSH configuration contains errors. Restoring original configuration."
|
||||||
|
cp "$BACKUP_FILE" "$SSHD_CONFIG"
|
||||||
|
restart_sshd "$SSHD_SERVICE"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Restart the SSH service
|
||||||
|
if restart_sshd "$SSHD_SERVICE"; then
|
||||||
|
echo "The SSH service has been restarted successfully."
|
||||||
|
echo "The SSH configuration has been updated according to Mozilla's security recommendations."
|
||||||
|
else
|
||||||
|
echo "Error: Failed to restart the SSH service. Restoring original configuration."
|
||||||
|
cp "$BACKUP_FILE" "$SSHD_CONFIG"
|
||||||
|
restart_sshd "$SSHD_SERVICE"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
# Execute the main process
|
||||||
|
main
|
||||||
33
miscellaneous/set_cpu_mode.sh
Normal file
33
miscellaneous/set_cpu_mode.sh
Normal file
@@ -0,0 +1,33 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
# Description:
|
||||||
|
# This script allows the user to select a CPU frequency scaling governor mode.
|
||||||
|
# The governor controls the trade-off between performance and power consumption.
|
||||||
|
# Available modes typically include "performance", "powersave", "ondemand", "conservative", and "schedutil".
|
||||||
|
|
||||||
|
# Usage:
|
||||||
|
# 1. Save this script to a file, e.g., set_cpu_mode.sh.
|
||||||
|
# 2. Make the script executable with the command: chmod +x set_cpu_mode.sh.
|
||||||
|
# 3. Run the script with: sudo ./set_cpu_mode.sh.
|
||||||
|
# 4. Follow the on-screen prompts to select the desired CPU governor mode.
|
||||||
|
|
||||||
|
# Ask the user to choose a mode
|
||||||
|
echo "Please choose a CPU frequency scaling governor mode:"
|
||||||
|
echo "1. performance"
|
||||||
|
echo "2. powersave"
|
||||||
|
echo "3. ondemand"
|
||||||
|
echo "4. conservative"
|
||||||
|
echo "5. schedutil"
|
||||||
|
read -p "Enter the number corresponding to your choice: " choice
|
||||||
|
|
||||||
|
# Array of available modes
|
||||||
|
modes=("performance" "powersave" "ondemand" "conservative" "schedutil")
|
||||||
|
|
||||||
|
# Check if the choice is valid
|
||||||
|
if [[ $choice -ge 1 && $choice -le ${#modes[@]} ]]; then
|
||||||
|
selected_mode=${modes[$choice-1]}
|
||||||
|
echo "$selected_mode" | sudo tee /sys/devices/system/cpu/cpu*/cpufreq/scaling_governor
|
||||||
|
echo "The $selected_mode mode has been applied."
|
||||||
|
else
|
||||||
|
echo "Invalid choice. No mode was applied."
|
||||||
|
fi
|
||||||
57
miscellaneous/sftp_chroot.sh
Normal file
57
miscellaneous/sftp_chroot.sh
Normal file
@@ -0,0 +1,57 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
# This script automatically configures a chroot environment for an SFTP user in the /sftp directory.
|
||||||
|
# It creates a user with restricted SFTP access, sets up the necessary directory structure,
|
||||||
|
# configures permissions, and adds an authorized_keys file for key-based authentication.
|
||||||
|
|
||||||
|
# Usage:
|
||||||
|
# Save this script as "sftp_chroot.sh" and make it executable by running the command: `chmod +x sftp_chroot.sh`.
|
||||||
|
# Then, execute it with root privileges using: `sudo ./sftp_chroot.sh`.
|
||||||
|
# The script will prompt you for the SFTP username, set up the necessary chroot environment, configure permissions,
|
||||||
|
# and apply the SSH settings to restrict the user to SFTP access only. Finally, it will restart the SSH service to
|
||||||
|
# apply the changes.
|
||||||
|
|
||||||
|
# Check if the script is executed with root privileges
|
||||||
|
if [[ $EUID -ne 0 ]]; then
|
||||||
|
echo "This script must be run as root."
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Prompt for the SFTP username
|
||||||
|
read -p "Enter the SFTP username: " USERNAME
|
||||||
|
|
||||||
|
# Create the user with /bin/false shell to limit access
|
||||||
|
useradd -m -d /sftp/$USERNAME -s /bin/false $USERNAME
|
||||||
|
|
||||||
|
# Create the chroot environment in /sftp
|
||||||
|
mkdir -p /sftp/$USERNAME
|
||||||
|
mkdir -p /sftp/$USERNAME/upload
|
||||||
|
mkdir -p /sftp/$USERNAME/.ssh
|
||||||
|
|
||||||
|
# Set permissions for the chroot directory
|
||||||
|
chown root:root /sftp/$USERNAME
|
||||||
|
chmod 755 /sftp/$USERNAME
|
||||||
|
chown $USERNAME:$USERNAME /sftp/$USERNAME/upload
|
||||||
|
|
||||||
|
# Create the authorized_keys file
|
||||||
|
touch /sftp/$USERNAME/.ssh/authorized_keys
|
||||||
|
chmod 700 /sftp/$USERNAME/.ssh
|
||||||
|
chmod 600 /sftp/$USERNAME/.ssh/authorized_keys
|
||||||
|
chown -R $USERNAME:$USERNAME /sftp/$USERNAME/.ssh
|
||||||
|
|
||||||
|
echo "User $USERNAME has been successfully configured in a chroot environment."
|
||||||
|
|
||||||
|
# Add SFTP configuration to sshd_config if necessary
|
||||||
|
if ! grep -q "Match User $USERNAME" /etc/ssh/sshd_config; then
|
||||||
|
echo -e "\n# SFTP configuration for $USERNAME" >> /etc/ssh/sshd_config
|
||||||
|
echo "Match User $USERNAME" >> /etc/ssh/sshd_config
|
||||||
|
echo " ChrootDirectory /sftp/$USERNAME" >> /etc/ssh/sshd_config
|
||||||
|
echo " ForceCommand internal-sftp" >> /etc/ssh/sshd_config
|
||||||
|
echo " AllowTcpForwarding no" >> /etc/ssh/sshd_config
|
||||||
|
echo " PermitTunnel no" >> /etc/ssh/sshd_config
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Restart the SSH service
|
||||||
|
systemctl restart ssh
|
||||||
|
|
||||||
|
echo "Chroot jail for $USERNAME configured successfully. You can now add SSH keys in /sftp/$USERNAME/.ssh/authorized_keys"
|
||||||
@@ -0,0 +1,187 @@
|
|||||||
|
# Configuration d'un tunnel Wireguard d'un serveur local à un VPS
|
||||||
|
|
||||||
|
Cette configuration permet d'établir une connexion entre votre réseau local et un serveur VPS, vous offrant la possibilité d'ouvrir les ports nécessaires sur le VPS sans exposer directement votre réseau. Vous bénéficiez également de la protection DDoS fournie par le serveur distant. Cette solution est pratique si vous ne pouvez pas ouvrir de ports sur votre routeur ou si vous disposez d'une adresse IP dynamique. L'utilisation d'un VPS comme intermédiaire vous permet de contourner les limitations de votre routeur ou de votre fournisseur d'accès Internet, tout en offrant une sécurité supplémentaire.
|
||||||
|
|
||||||
|
## Prérequis
|
||||||
|
|
||||||
|
- Un minimum de connaissances en réseau, administration VPS.
|
||||||
|
- Vous disposez d'un VPS sur Debian chez IONOS, OVH, autres.
|
||||||
|
- Connaître l'adresse IP publique du VPS.
|
||||||
|
- Une heure de temps pour faire la configuration.
|
||||||
|
|
||||||
|
## Installation du serveur WireGuard sur le VPS
|
||||||
|
|
||||||
|
Connectez-vous en SSH à votre machine :
|
||||||
|
|
||||||
|
1. Connectez-vous en tant que root :
|
||||||
|
```bash
|
||||||
|
sudo -i
|
||||||
|
```
|
||||||
|
2. Suivez les instructions de ce [dépôt](https://github.com/angristan/wireguard-install) pour installer votre serveur WireGuard. Laissez tout par défaut et appelez votre client le nom de votre machine locale pour vous en souvenir si vous créez plusieurs clients.
|
||||||
|
4. À la fin de l'installation, vous aurez accès au fichier de configuration. Exécutez la commande `cat /chemindevotrefichier.conf` pour copier les informations du client. Notez ces informations quelque part afin de pouvoir les utiliser plus tard dans le processus d'installation.
|
||||||
|
|
||||||
|
4. Vous pouvez créer plusieurs clients, mais les machines ne communiqueront pas ensemble avec `iptables -I FORWARD -i wg0 -s 10.66.66.0/24 -d 10.66.66.0/24 -j DROP`, ce qui est une bonne pratique de sécurité.
|
||||||
|
5. Vous devez lire le fichier de configuration pour comprendre son fonctionnement :
|
||||||
|
|
||||||
|
```ini
|
||||||
|
# Description :
|
||||||
|
# Cette configuration vous permet d'établir une connexion entre une machine dans votre réseau local ou un routeur OPNsense et un serveur VPS. Cela vous permet d'ouvrir les ports nécessaires sur le VPS sans exposer votre réseau local, tout en bénéficiant de la protection DDoS fournie par le serveur distant.
|
||||||
|
|
||||||
|
# Prérequis :
|
||||||
|
# - Avoir un serveur VPS chez OVH ou Ionos avec Debian
|
||||||
|
# - Installer https://github.com/angristan/wireguard-install et générer un client
|
||||||
|
# - Connaître l'interface réseau en utilisant la commande 'ip a' et remplacer 'ens6' par le nom correct de l'interface
|
||||||
|
# - Connaître l'adresse IP publique du VPS
|
||||||
|
|
||||||
|
# Sauvegarder la configuration actuelle avant de la modifier
|
||||||
|
# Effectuez cette sauvegarde avant toute modification
|
||||||
|
# cp /etc/wireguard/wg0.conf /etc/wireguard/wg0.conf.bak
|
||||||
|
|
||||||
|
# Arrêter le service WireGuard avant de modifier la configuration
|
||||||
|
# sudo wg-quick down wg0
|
||||||
|
|
||||||
|
# Modifier la configuration de WireGuard
|
||||||
|
# Ouvrez le fichier de configuration pour le modifier
|
||||||
|
# nano /etc/wireguard/wg0.conf
|
||||||
|
# Supprimez la section entre 'PrivateKey = x' et '### Client opnsense' et rajouter cette configuration avec tout les commentaires pour vous aidez
|
||||||
|
# Remplacez 'PORT_WIREGUARD' et 'IP_PUBLIQUE_DU_SERVEUR-VPS' par le port WireGuard et l'adresse IP publique du VPS
|
||||||
|
# Il est important d'ouvrir en `PostUp` et en `PostDown` le port que vous avez ouvert.
|
||||||
|
# Pour un deuxième client, ajoutez les lignes et modifiez l'adresse IP locale : '10.66.66.3' en prenant exemple sur '10.66.66.2'
|
||||||
|
|
||||||
|
# Règles iptables à appliquer après avoir configuré l'interface WireGuard
|
||||||
|
PostUp = sysctl -w net.ipv4.ip_forward=1 # Active le forwarding IPv4 pour permettre le routage entre les interfaces
|
||||||
|
PostUp = iptables -I INPUT -p udp --dport PORT_WIREGUARD -j ACCEPT # Autorise le trafic entrant sur le port UDP de WireGuard (remplacez 'PORT_WIREGUARD' par le numéro de port réel)
|
||||||
|
PostUp = iptables -A FORWARD -i wg0 -o ens6 -j ACCEPT # Autorise le trafic passant de l'interface WireGuard (wg0) vers l'interface réseau (ens6)
|
||||||
|
PostUp = iptables -A FORWARD -i ens6 -o wg0 -m conntrack --ctstate ESTABLISHED,RELATED -j ACCEPT # Autorise les connexions établies et connexes à revenir sur l'interface WireGuard
|
||||||
|
PostUp = iptables -I FORWARD -i wg0 -s 10.66.66.0/24 -d 10.66.66.0/24 -j DROP # Bloque le trafic interne au sous-réseau WireGuard pour éviter les boucles ou les attaques internes
|
||||||
|
|
||||||
|
# Règles de translation d'adresse réseau (NAT) pour le trafic sortant et entrant
|
||||||
|
PostUp = iptables -t nat -A POSTROUTING -s 10.66.66.2/32 -o ens6 -j SNAT --to-source IP_PUBLIQUE_DU_SERVEUR-VPS # Effectue une translation d'adresse pour le trafic sortant du client vers l'IP publique (remplacez 'IP_PUBLIQUE_DU_SERVEUR' par l'adresse réelle)
|
||||||
|
PostUp = iptables -t nat -A PREROUTING -i ens6 -d IP_PUBLIQUE_DU_SERVEUR-VPS -p tcp --dport 80 -j DNAT --to-destination 10.66.66.2:80 # Redirige le trafic HTTP entrant vers l'adresse IP du client WireGuard
|
||||||
|
PostUp = iptables -t nat -A PREROUTING -i ens6 -d IP_PUBLIQUE_DU_SERVEUR-VPS -p tcp --dport 443 -j DNAT --to-destination 10.66.66.2:443 # Redirige le trafic HTTPS entrant vers l'adresse IP du client WireGuard
|
||||||
|
|
||||||
|
# Règles iptables à retirer lors de la suppression de l'interface WireGuard
|
||||||
|
PostDown = iptables -D INPUT -p udp --dport PORT_WIREGUARD -j ACCEPT || true # Supprime la règle autorisant le trafic entrant sur le port UDP de WireGuard (remplacez 'PORT_WIREGUARD' par le numéro de port réel)
|
||||||
|
PostDown = iptables -D FORWARD -i wg0 -o ens6 -j ACCEPT || true # Supprime la règle autorisant le trafic de wg0 vers ens6
|
||||||
|
PostDown = iptables -D FORWARD -i ens6 -o wg0 -m conntrack --ctstate ESTABLISHED,RELATED -j ACCEPT || true # Supprime la règle autorisant les connexions établies à retourner vers wg0
|
||||||
|
PostDown = iptables -D FORWARD -i wg0 -s 10.66.66.0/24 -d 10.66.66.0/24 -j DROP || true # Supprime la règle bloquant le trafic interne au sous-réseau WireGuard
|
||||||
|
PostDown = iptables -t nat -D POSTROUTING -s 10.66.66.2/32 -o ens6 -j SNAT --to-source IP_PUBLIQUE_DU_SERVEUR-VPS || true # Supprime la règle de NAT pour le trafic sortant du client
|
||||||
|
PostDown = iptables -t nat -D PREROUTING -i ens6 -d IP_PUBLIQUE_DU_SERVEUR-VPS -p tcp --dport 80 -j DNAT --to-destination 10.66.66.2:80 || true # Supprime la redirection du trafic HTTP entrant
|
||||||
|
PostDown = iptables -t nat -D PREROUTING -i ens6 -d IP_PUBLIQUE_DU_SERVEUR-VPS -p tcp --dport 443 -j DNAT --to-destination 10.66.66.2:443 || true # Supprime la redirection du trafic HTTPS entrant
|
||||||
|
|
||||||
|
# Redémarrer le service WireGuard après avoir modifié la configuration
|
||||||
|
# sudo wg-quick up wg0
|
||||||
|
|
||||||
|
# Configuration OPNsense :
|
||||||
|
# Par exemple, rediriger le port de 192.168.1.x vers [local_ip_wireguard_client_10.66.66.X]
|
||||||
|
```
|
||||||
|
5. Éditez le fichier de configuration :
|
||||||
|
```bash
|
||||||
|
nano /etc/wg0.conf
|
||||||
|
```
|
||||||
|
## Installation du client Wireguard sur le serveur dans votre réseau local
|
||||||
|
|
||||||
|
Ce guide fournit les instructions étape par étape pour installer le client généré précédemment sur un système Debian.
|
||||||
|
|
||||||
|
|
||||||
|
### 1. Mise à jour du Système
|
||||||
|
|
||||||
|
Mettez à jour les paquets du système :
|
||||||
|
|
||||||
|
```bash
|
||||||
|
sudo apt update && sudo apt upgrade -y
|
||||||
|
```
|
||||||
|
|
||||||
|
### 2. Installer WireGuard
|
||||||
|
|
||||||
|
Installez WireGuard à partir des dépôts de Debian :
|
||||||
|
|
||||||
|
```bash
|
||||||
|
sudo apt install wireguard curl resolvconf iptables -y
|
||||||
|
```
|
||||||
|
|
||||||
|
### 3. Générer des Clés
|
||||||
|
|
||||||
|
Générez une clé privée et une clé publique pour l'interface WireGuard :
|
||||||
|
|
||||||
|
```bash
|
||||||
|
wg genkey | tee privatekey | wg pubkey > publickey
|
||||||
|
```
|
||||||
|
|
||||||
|
### 4. Configurer WireGuard
|
||||||
|
|
||||||
|
Créez un fichier de configuration pour l'interface WireGuard :
|
||||||
|
|
||||||
|
```bash
|
||||||
|
sudo nano /etc/wireguard/wg0.conf
|
||||||
|
```
|
||||||
|
|
||||||
|
Collez ou modifiez la configuration suivante dans le fichier, en remplaçant les espaces réservés par les valeurs appropriées :
|
||||||
|
|
||||||
|
```ini
|
||||||
|
[Interface]
|
||||||
|
PrivateKey = VOTRE_CLE_PRIVEE
|
||||||
|
Address = 10.0.0.2/24 # IP locale pour l'interface WireGuard
|
||||||
|
ListenPort = 51820 # Le port WireGuard
|
||||||
|
|
||||||
|
# Autoriser le trafic entrant UDP sur le port 22 depuis le réseau local
|
||||||
|
PostUp = iptables -A INPUT -s 192.168.0.0/24 -p udp --dport 22 -j ACCEPT
|
||||||
|
|
||||||
|
# Bloquer tout autre trafic entrant depuis le réseau local
|
||||||
|
PostUp = iptables -A INPUT -s 192.168.0.0/24 -j DROP
|
||||||
|
|
||||||
|
# Supprimer la règle autorisant le trafic entrant TCP sur le port 22 depuis le réseau local
|
||||||
|
PostDown = iptables -D INPUT -s 192.168.0.0/24 -p tcp --dport 22 -j ACCEPT
|
||||||
|
|
||||||
|
# Supprimer la règle autorisant le trafic entrant UDP sur le port 22 depuis le réseau local
|
||||||
|
PostDown = iptables -D INPUT -s 192.168.0.0/24 -p udp --dport 22 -j ACCEPT
|
||||||
|
|
||||||
|
# Supprimer la règle bloquant tout autre trafic entrant depuis le réseau local
|
||||||
|
PostDown = iptables -D INPUT -s 192.168.0.0/24 -j DROP
|
||||||
|
|
||||||
|
[Peer]
|
||||||
|
PublicKey = CLE_PUBLIQUE_DU_SERVEUR
|
||||||
|
Endpoint = IP_PUBLIQUE_DU_SERVEUR:51820 # Le port WireGuard
|
||||||
|
AllowedIPs = 0.0.0.0/0
|
||||||
|
```
|
||||||
|
|
||||||
|
### 5. Démarrer WireGuard
|
||||||
|
|
||||||
|
Mettez en route l'interface WireGuard :
|
||||||
|
|
||||||
|
```bash
|
||||||
|
sudo wg-quick up wg0
|
||||||
|
```
|
||||||
|
|
||||||
|
Activez WireGuard au démarrage :
|
||||||
|
|
||||||
|
```bash
|
||||||
|
sudo systemctl enable wg-quick@wg0
|
||||||
|
```
|
||||||
|
|
||||||
|
### 6. Vérifier la Connexion
|
||||||
|
|
||||||
|
Vérifiez l'état de WireGuard :
|
||||||
|
|
||||||
|
```bash
|
||||||
|
sudo wg
|
||||||
|
```
|
||||||
|
|
||||||
|
### 7. Tester l'IP Publique
|
||||||
|
|
||||||
|
Vérifiez votre adresse IP publique en utilisant `curl` :
|
||||||
|
|
||||||
|
```bash
|
||||||
|
curl ifconfig.me
|
||||||
|
```
|
||||||
|
|
||||||
|
### 8. Arrêter WireGuard
|
||||||
|
|
||||||
|
Fermez l'interface :
|
||||||
|
|
||||||
|
```bash
|
||||||
|
sudo wg-quick down wg0
|
||||||
|
```
|
||||||
|
### 9. Votre serveur est normalement accessible
|
||||||
|
|
||||||
|
Votre serveur est normalement accessible depuis l'extérieur et bloque le trafic du réseau local.
|
||||||
157
networking/WireGuard_Setup_Guide_Debian.md
Normal file
157
networking/WireGuard_Setup_Guide_Debian.md
Normal file
@@ -0,0 +1,157 @@
|
|||||||
|
|
||||||
|
# WireGuard VPN Setup on Debian
|
||||||
|
|
||||||
|
This guide provides step-by-step instructions for installing and configuring WireGuard on a Debian system. Separate instructions are given for both client and server setups.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Requirements
|
||||||
|
|
||||||
|
- Debian-based system (Debian 12+) with root or sudo privileges.
|
||||||
|
- Public and private key pair for WireGuard.
|
||||||
|
- A server to connect to (for client setup).
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Client-Side Setup
|
||||||
|
|
||||||
|
### 1. System Update
|
||||||
|
|
||||||
|
Update the system packages:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
sudo apt update && sudo apt upgrade -y
|
||||||
|
```
|
||||||
|
|
||||||
|
### 2. Install WireGuard
|
||||||
|
|
||||||
|
Install WireGuard from Debian's repositories:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
sudo apt install wireguard -y
|
||||||
|
```
|
||||||
|
|
||||||
|
### 3. Generate Keys
|
||||||
|
|
||||||
|
Generate a private and public key for the WireGuard interface:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
wg genkey | tee privatekey | wg pubkey > publickey
|
||||||
|
```
|
||||||
|
|
||||||
|
The following files are generated:
|
||||||
|
- `privatekey`: Your WireGuard private key.
|
||||||
|
- `publickey`: Your WireGuard public key.
|
||||||
|
|
||||||
|
### 4. Configure WireGuard
|
||||||
|
|
||||||
|
Create a configuration file for the WireGuard interface:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
sudo nano /etc/wireguard/wg0.conf
|
||||||
|
```
|
||||||
|
|
||||||
|
Paste or modify the following configuration in the file, replacing placeholders with appropriate values:
|
||||||
|
|
||||||
|
```ini
|
||||||
|
[Interface]
|
||||||
|
PrivateKey = YOUR_PRIVATE_KEY
|
||||||
|
Address = 10.0.0.2/24 # Local IP for WireGuard interface
|
||||||
|
ListenPort = 51820
|
||||||
|
|
||||||
|
[Peer]
|
||||||
|
PublicKey = SERVER_PUBLIC_KEY
|
||||||
|
Endpoint = SERVER_IP:51820
|
||||||
|
AllowedIPs = 0.0.0.0/0
|
||||||
|
PersistentKeepalive = 25
|
||||||
|
```
|
||||||
|
|
||||||
|
### 5. Start WireGuard
|
||||||
|
|
||||||
|
Bring up the WireGuard interface:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
sudo wg-quick up wg0
|
||||||
|
```
|
||||||
|
|
||||||
|
Enable WireGuard at startup:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
sudo systemctl enable wg-quick@wg0
|
||||||
|
```
|
||||||
|
|
||||||
|
### 6. Verify the Connection
|
||||||
|
|
||||||
|
Check the status of WireGuard:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
sudo wg
|
||||||
|
```
|
||||||
|
|
||||||
|
### 7. Test the Public IP
|
||||||
|
|
||||||
|
Verify your public IP address using `curl`:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
curl ifconfig.me
|
||||||
|
```
|
||||||
|
|
||||||
|
### 8. Stop WireGuard
|
||||||
|
|
||||||
|
Bring down the interface:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
sudo wg-quick down wg0
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Server-Side Configuration
|
||||||
|
|
||||||
|
Ensure the server has the appropriate WireGuard setup before trying to connect from the client.
|
||||||
|
|
||||||
|
### 1. Generate Server Keys
|
||||||
|
|
||||||
|
On the server, generate the private and public keys:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
wg genkey | tee server_privatekey | wg pubkey > server_publickey
|
||||||
|
```
|
||||||
|
|
||||||
|
### 2. Set Up Server Configuration
|
||||||
|
|
||||||
|
Create and edit the WireGuard configuration file:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
sudo nano /etc/wireguard/wg0.conf
|
||||||
|
```
|
||||||
|
|
||||||
|
Paste or modify the following configuration in the file, replacing placeholders:
|
||||||
|
|
||||||
|
```ini
|
||||||
|
[Interface]
|
||||||
|
PrivateKey = SERVER_PRIVATE_KEY
|
||||||
|
Address = 10.0.0.1/24
|
||||||
|
ListenPort = 51820
|
||||||
|
|
||||||
|
[Peer]
|
||||||
|
PublicKey = CLIENT_PUBLIC_KEY
|
||||||
|
AllowedIPs = 10.0.0.2/32
|
||||||
|
```
|
||||||
|
|
||||||
|
### 3. Start the WireGuard Server
|
||||||
|
|
||||||
|
Start and enable the WireGuard service:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
sudo wg-quick up wg0
|
||||||
|
sudo systemctl enable wg-quick@wg0
|
||||||
|
```
|
||||||
|
|
||||||
|
### 4. Firewall and Port Forwarding
|
||||||
|
|
||||||
|
Ensure port 51820 is open on any firewalls or routers.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
This README outlines the steps for setting up WireGuard on Debian. Adjust configurations based on your network setup and requirements.
|
||||||
63
networking/vpn-wg-site-to-vps.conf
Normal file
63
networking/vpn-wg-site-to-vps.conf
Normal file
@@ -0,0 +1,63 @@
|
|||||||
|
# Description:
|
||||||
|
# This configuration allows you to connect an OPNsense or pfSense router at home and link it to a VPS IP.
|
||||||
|
# It uses a public IP to redirect traffic to different machines behind the VPS for security reasons rather than opening ports on your home router.
|
||||||
|
|
||||||
|
# Prerequisites:
|
||||||
|
# - Have a VPS server at OVH or Ionos with Debian
|
||||||
|
# - Install https://github.com/angristan/wireguard-install and generate a client
|
||||||
|
# - Know the network interface using the 'ip a' command and replace 'ens6' with the correct interface name
|
||||||
|
# - Know the public IP address or IP addresses of the clients
|
||||||
|
|
||||||
|
# Backup the current configuration before modifying it
|
||||||
|
# Perform this backup before any modification
|
||||||
|
# cp /etc/wireguard/wg0.conf /etc/wireguard/wg0.conf.bak
|
||||||
|
|
||||||
|
# Stopping the WireGuard service before modifying the configuration
|
||||||
|
# Make sure to stop the service before modifying the configuration
|
||||||
|
# sudo systemctl stop wg-quick@wg0
|
||||||
|
|
||||||
|
# Modify the WireGuard configuration
|
||||||
|
# Open the configuration file to modify it
|
||||||
|
# nano /etc/wireguard/wg0.conf
|
||||||
|
# Delete the section between 'PrivateKey = x' and '### Client opnsense'
|
||||||
|
# Replace 'your-port' and 'your-public-ip' with actual port and IP information
|
||||||
|
|
||||||
|
# WireGuard server side configuration
|
||||||
|
[Interface]
|
||||||
|
Address = 10.66.66.1/24, fd42:42:42::1/64
|
||||||
|
ListenPort = your-port
|
||||||
|
PrivateKey = x
|
||||||
|
|
||||||
|
# iptables rules to apply after setting up the WireGuard interface
|
||||||
|
PostUp = iptables -I INPUT -p udp --dport your-port -j ACCEPT # Replace 'your-port' with actual port number
|
||||||
|
PostUp = iptables -A FORWARD -i wg0 -o ens6 -j ACCEPT
|
||||||
|
PostUp = iptables -A FORWARD -i ens6 -o wg0 -m conntrack --ctstate ESTABLISHED,RELATED -j ACCEPT
|
||||||
|
PostUp = iptables -I FORWARD -i wg0 -s 10.66.66.0/24 -d 10.66.66.0/24 -j DROP
|
||||||
|
|
||||||
|
PostUp = iptables -t nat -A POSTROUTING -s 10.66.66.2/32 -o ens6 -j SNAT --to-source your-public-ip # NAT for outgoing traffic from the opnsense client
|
||||||
|
PostUp = iptables -t nat -A PREROUTING -i ens6 -d your-public-ip -p tcp --dport 80 -j DNAT --to-destination 10.66.66.2:80 # Redirect port 80 to the opnsense client
|
||||||
|
PostUp = iptables -t nat -A PREROUTING -i ens6 -d your-public-ip -p tcp --dport 443 -j DNAT --to-destination 10.66.66.2:443 # Redirect port 443 to the opnsense client
|
||||||
|
|
||||||
|
# iptables rules to remove when deleting the WireGuard interface
|
||||||
|
PostDown = iptables -D INPUT -p udp --dport your-port -j ACCEPT || true # Replace 'your-port' with actual port number
|
||||||
|
PostDown = iptables -D FORWARD -i wg0 -o ens6 -j ACCEPT || true
|
||||||
|
PostDown = iptables -D FORWARD -i ens6 -o wg0 -m conntrack --ctstate ESTABLISHED,RELATED -j ACCEPT || true
|
||||||
|
PostDown = iptables -t nat -D POSTROUTING -s 10.66.66.2/32 -o ens6 -j SNAT --to-source your-public-ip || true
|
||||||
|
PostDown = iptables -t nat -D PREROUTING -i ens6 -d your-public-ip -p tcp --dport 80 -j DNAT --to-destination 10.66.66.2:80 || true
|
||||||
|
PostDown = iptables -t nat -D PREROUTING -i ens6 -d your-public-ip -p tcp --dport 443 -j DNAT --to-destination 10.66.66.2:443 || true
|
||||||
|
|
||||||
|
### Client opnsense Configuration
|
||||||
|
[Peer]
|
||||||
|
PublicKey = x
|
||||||
|
PresharedKey = x
|
||||||
|
AllowedIPs = 10.66.66.2/32, fd42:42:42::2/128
|
||||||
|
|
||||||
|
# Restart the WireGuard service after modifying the configuration
|
||||||
|
# sudo systemctl restart wg-quick@wg0
|
||||||
|
# sudo systemctl status wg-quick@wg0
|
||||||
|
|
||||||
|
# OPNsense configuration:
|
||||||
|
# Follow the steps described in the OPNsense documentation:
|
||||||
|
# https://docs.opnsense.org
|
||||||
|
# Configure port forwarding from the OPNsense router to the local IP of the WireGuard client
|
||||||
|
# For example, forward the port from 192.168.1.x to [local_ip_of_wireguard_client]
|
||||||
14
readme.md
14
readme.md
@@ -1,6 +1,6 @@
|
|||||||
## Debian System Administration Scripts
|
## Debian System Administration
|
||||||
|
|
||||||
This Git repository is a collection of useful Bash scripts for system administration on Debian. It will be gradually completed with scripts designed to automate various common tasks, facilitate system management, and improve productivity.
|
This Git repository is a collection of useful Bash scripts and READMEs for system administration on Debian. It will be gradually completed.
|
||||||
|
|
||||||
### Content
|
### Content
|
||||||
|
|
||||||
@@ -12,17 +12,19 @@ The repository will be organized into several categories for easy navigation:
|
|||||||
* **Networking**
|
* **Networking**
|
||||||
* **Miscellaneous**
|
* **Miscellaneous**
|
||||||
|
|
||||||
### Usag
|
### Usage
|
||||||
|
|
||||||
Detailed instructions on how to use each script will be provided as they are added to the repository. In general, you will need to follow these basic steps:
|
Detailed instructions on how to use each script will be provided as they are added to the repository. In general, you will need to follow these basic steps:
|
||||||
|
|
||||||
1. **Make the script executable:**
|
1. **Read the script's description:** Each script will have a description at the beginning explaining its purpose and usage. Make sure to read it carefully before running the script.
|
||||||
|
|
||||||
|
2. **Make the script executable:**
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
chmod +x script_name.sh
|
chmod +x script_name.sh
|
||||||
```
|
```
|
||||||
|
|
||||||
2. **Execute the script:**
|
3. **Execute the script:**
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
./script_name.sh
|
./script_name.sh
|
||||||
@@ -34,8 +36,6 @@ Detailed instructions on how to use each script will be provided as they are add
|
|||||||
sudo ./script_name.sh
|
sudo ./script_name.sh
|
||||||
```
|
```
|
||||||
|
|
||||||
3. **Read the script's description:** Each script will have a description at the beginning explaining its purpose and usage. Make sure to read it carefully before running the script.
|
|
||||||
|
|
||||||
### Contribution
|
### Contribution
|
||||||
|
|
||||||
Contributions are welcome! If you have useful scripts to share or improvements to suggest, feel free to submit a pull request.
|
Contributions are welcome! If you have useful scripts to share or improvements to suggest, feel free to submit a pull request.
|
||||||
|
|||||||
Reference in New Issue
Block a user