diff --git a/.github/workflows/markdown-lint.yml b/.github/workflows/markdown-lint.yml
new file mode 100644
index 0000000..306957d
--- /dev/null
+++ b/.github/workflows/markdown-lint.yml
@@ -0,0 +1,19 @@
+name: Markdown lint
+
+on:
+ push:
+ branches: [ main ]
+ pull_request:
+
+jobs:
+ lint:
+ runs-on: ubuntu-latest
+ steps:
+ - uses: actions/checkout@v4
+ - uses: actions/setup-python@v4
+ with:
+ python-version: "3.11"
+ - name: Install pymarkdown
+ run: pip install pymarkdownlnt
+ - name: Run pymarkdown
+ run: pymarkdown --config .pymarkdown.json scan .
diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml
new file mode 100644
index 0000000..21f09f5
--- /dev/null
+++ b/.pre-commit-config.yaml
@@ -0,0 +1,12 @@
+repos:
+ - repo: https://github.com/pre-commit/pre-commit-hooks
+ rev: v4.5.0
+ hooks:
+ - id: end-of-file-fixer
+ - id: trailing-whitespace
+ - repo: https://github.com/jackdewinter/pymarkdown
+ rev: v0.9.14
+ hooks:
+ - id: pymarkdown
+ args: ["scan", "."]
+ additional_dependencies: []
diff --git a/.pymarkdown.json b/.pymarkdown.json
new file mode 100644
index 0000000..bf49074
--- /dev/null
+++ b/.pymarkdown.json
@@ -0,0 +1,12 @@
+{
+ "plugins": {
+ "md013": {
+ "enabled": false
+ },
+ "md033": {
+ "allowed_elements": [
+ "br"
+ ]
+ }
+ }
+}
diff --git a/Automation/Ansible-Beginner.md b/Automation/Ansible-Beginner.md
new file mode 100644
index 0000000..6ef12eb
--- /dev/null
+++ b/Automation/Ansible-Beginner.md
@@ -0,0 +1,113 @@
+# Learning Ansible
+
+- **Author:** nduytg
+- **Version:** 0.3
+- **Date:** 2017-04-12
+- **Tested on:** Ubuntu 16.04
+
+This quick-start shows how to install Ansible on Ubuntu and exercise it against
+lightweight LXC containers that simulate a small fleet of hosts.
+
+## Installation options
+
+### Option 1: Clone from Git
+
+```bash
+sudo apt-get install git-core
+cd ~
+git clone https://github.com/ansible/ansible
+cd ansible
+git log
+git submodule update --init --recursive
+sudo apt-get install python-jinja2 python-paramiko python-yaml sshpass
+```
+
+Activate Ansible's development environment and confirm the binary is available.
+
+```bash
+which ansible
+less ./hacking/env-setup
+source ./hacking/env-setup
+which ansible
+ansible --version
+```
+
+### Option 2: Install from apt
+
+```bash
+sudo apt-get update
+sudo apt-get install software-properties-common
+sudo apt-add-repository ppa:ansible/ansible
+sudo apt-get update
+sudo apt-get install ansible
+```
+
+## Prepare target containers with LXC
+
+Install LXC on the control node and create a few Ubuntu containers that mimic a
+small environment.
+
+```bash
+sudo apt-get update
+sudo apt-get install lxc
+lxc-ls --fancy
+```
+
+Create two web servers and one database server.
+
+```bash
+sudo lxc-create -n web1 -t ubuntu
+sudo lxc-create -n web2 -t ubuntu
+sudo lxc-create -n db1 -t ubuntu
+```
+
+List and start the containers as background services.
+
+```bash
+lxc-ls -f
+sudo lxc-start -n web1 -d
+sudo lxc-start -n web2 -d
+sudo lxc-start -n db1 -d
+```
+
+Log into a container to satisfy Ansible's prerequisites (Python 2.x and SSH).
+
+```bash
+sudo lxc-attach -n web1
+sudo apt-get install python-minimal
+exit
+```
+
+Repeat for the remaining containers as needed.
+
+## Create an inventory
+
+Build a simple inventory that groups the LXC instances into logical roles.
+
+```ini
+# inventory
+[allservers]
+10.0.3.113
+10.0.3.247
+10.0.3.95
+
+[web]
+10.0.3.247
+10.0.3.95
+
+[database]
+10.0.3.113
+```
+
+## Run ad-hoc commands
+
+Verify connectivity and perform common administrative actions.
+
+```bash
+ansible allservers -m ping -u ubuntu -i inventory
+ansible allservers -a "free -h" -u ubuntu -i inventory
+ansible allservers -a "apt list --installed | grep nginx" -u ubuntu -i inventory
+ansible allservers -a "apt-get update" -u root -i inventory
+ansible web -a "apt-get install -y nginx" -u root -i inventory
+ansible web -m service -a "name=nginx state=restarted" -u root -i inventory
+```
diff --git a/Automation/Ansible-Beginner.txt b/Automation/Ansible-Beginner.txt
deleted file mode 100644
index 805d0aa..0000000
--- a/Automation/Ansible-Beginner.txt
+++ /dev/null
@@ -1,93 +0,0 @@
-######################################
-## Learning Ansible ##
-## Author: nduytg ##
-## Version 0.3 - Date: 4/12/17 ##
-######################################
-
-# Tested on Ubuntu 16.04
-# Ansible + LXC on Ubuntu
-
-### Install Ansible ###
-## Option 1: Clone from git ##
-apt-get install git-core
-
-cd ~
-git clone https://github.com/ansible/ansible
-
-cd ansible
-git log
-git submodule update --init --recursive
-apt-get install python-jinja2 python-paramiko python-yaml sshpass
-
-which ansible
-
-less ./hacking/env-setup
-source ./hacking/env-setup
-
-which ansible
-ansible --version
-
-## Option 2: Install form Yum ##
-apt-get update
-apt-get install software-properties-common
-apt-add-repository ppa:ansible/ansible
-apt-get update
-apt-get install ansible
-
-### Create Target Machine with LXC ###
-# Simulate multiple server with LXC (Linux Container) #
-apt-get update && apt-get install lxc
-
-lxc-ls --fancy
-
-# 2 Web, 1 DB #
-lxc-create -n web1 -t ubuntu
-lxc-create -n web2 -t ubuntu
-lxc-create -n db1 -t ubuntu
-
-# List all containers
-lxc-ls -f
-
-# Start these containers as deamon processes #
-lxc-start -n web1 -d
-
-# Log into web1 container #
-lxc-attach -n web1
-
-### Prepare target machine (LXC Containers) ###
-# Requirement: Python 2 and OpenSSH
-lxc-attach -n web1
-apt-get install python-minimal
-
-### Ad-hoc commands ###
-vim inventory
-[allservers]
-10.0.3.113
-10.0.3.247
-10.0.3.95
-
-[web]
-10.0.3.247
-10.0.3.95
-
-[database]
-10.0.3.113
--------------------
-
-# Ping all servers named in inventory list
-ansible allservers -m ping -u ubuntu -i inventory
-
-# Show available memory on all servers
-ansible allservers -a "free -h" -u ubuntu -i inventory
-
-# Check if these hosts have nginx installed on
-ansible allservers -a "apt list installed | grep nginx" -u ubuntu -i inventory
-
-# Update all hosts
-ansible allservers -a "apt-get update" -u root -i inventory
-
-# Install nginx on web1 and web2
-ansible web -a "apt-get install -y nginx" -u root -i inventory
-
-# Restart nginx on web1 and web2
-ansible web -m service -a "name=nginx state=restarted" -i inventory -u root
diff --git a/Backup/backup-rsync-crontab.md b/Backup/backup-rsync-crontab.md
new file mode 100644
index 0000000..c30784b
--- /dev/null
+++ b/Backup/backup-rsync-crontab.md
@@ -0,0 +1,47 @@
+# Rsync Backup Managed by Cron
+
+- **Author:** nduytg
+- **Version:** 0.5
+- **Date:** 2017-11-21
+- **Tested on:** CentOS 7
+
+Automate incremental backups with `rsync` and `cron`. Start with a local copy,
+expand to a remote target over SSH, and finish by restarting the cron service to
+apply schedule changes.
+
+## Local dry run
+
+```bash
+sudo yum install rsync
+rsync -av --dry-run --delete /home/nduytg/source/ /home/nduytg/backup/
+```
+
+## Remote dry run over SSH
+
+```bash
+sudo yum install openssh-clients rsync
+rsync -av --dry-run --delete -e ssh /home/nduytg/source/ \
+ root@192.168.31.131:/root/backup
+```
+
+Generate SSH keys and copy the public key to the remote host to avoid
+interactive password prompts when cron executes the job.
+
+## Schedule the synchronization
+
+```bash
+sudo crontab -e
+```
+
+Example entry (run every two minutes during testing):
+
+```cron
+*/2 * * * * rsync -av --delete -e ssh /home/nduytg/source/ \
+ root@192.168.31.131:/root/backup > /dev/null 2>&1
+```
+
+Restart the cron daemon after editing the schedule.
+
+```bash
+sudo systemctl restart crond
+```
diff --git a/Backup/backup-rsync-crontab.txt b/Backup/backup-rsync-crontab.txt
deleted file mode 100644
index daee252..0000000
--- a/Backup/backup-rsync-crontab.txt
+++ /dev/null
@@ -1,27 +0,0 @@
-#######################################
-## Backup Script with Crontab #####
-## Author: nduytg #####
-## Version 0.5 - Date: 21/11/17 #####
-#######################################
-
-# Tested on CentOS7
-
-
-### Local Backup ###
-yum install rsync
-rsync –av --dry-run --delete /home/nduytg/source/ /home/nduytg/backup/
-
-### External Backup (via SSH) ###
-yum install ssh rsync
-rsync -av --dry-run --delete -e ssh /home/nduytg/source/ root@192.168.31.131:/root/backup
-
-### Set up Private Key => Auto Login ###
-
-### Schedule backup jobs with crontab ###
-crontab -e
-
-# Backup every 2 minutes #
-*/2 * * * * rsync -av --delete -e ssh /home/nduytg/source/ root@192.168.31.131:/root/backup > /dev/null
-
-# Restart Crond #
-service crond restart
\ No newline at end of file
diff --git a/Backup/backup-script-basic.md b/Backup/backup-script-basic.md
new file mode 100644
index 0000000..2488a7c
--- /dev/null
+++ b/Backup/backup-script-basic.md
@@ -0,0 +1,87 @@
+# Rotate Backup Script (Phase 1)
+
+- **Author:** nduytg
+- **Version:** 0.0
+- **Date:** 2017-11-xx
+- **Tested on:** CentOS 7
+
+This starter script keeps three rolling daily backups by cloning the most recent
+snapshot and synchronizing changes from a source directory. It is intended to be
+run manually while iterating on the rotation logic.
+
+## Usage
+
+```bash
+./backup source_path remote_user remote_ip target_path
+```
+
+The script expects passwordless SSH access to the remote target. Update the
+`SOURCE_PATH`, `REMOTE_USER`, `REMOTE_IP`, and `TARGET_PATH` arguments to match
+your environment.
+
+## Script listing
+
+```bash
+#!/bin/bash
+set -euo pipefail
+
+usage() {
+ cat <<'USAGE'
+Welcome to the rotating backup script.
+Usage: ./backup source_path remote_user remote_ip target_path
+USAGE
+}
+
+TODAY="$(date +%Y-%m-%d)"
+YESTERDAY="$(date -d '1 day ago' +%Y-%m-%d)"
+LOG_FILE="backup_log.log"
+
+if [[ $# -lt 4 ]]; then
+ usage
+ {
+ echo "Not enough arguments";
+ echo "Exit now";
+ } >>"${LOG_FILE}"
+ exit 1
+fi
+
+SOURCE_PATH=$1
+REMOTE_USER=$2
+REMOTE_IP=$3
+TARGET_PATH=$4
+
+mkdir -p "${TARGET_PATH}"
+
+# Rotate the previous snapshots.
+rm -rf "${TARGET_PATH}/daily_backup.3"
+if [[ -d "${TARGET_PATH}/daily_backup.2" ]]; then
+ mv "${TARGET_PATH}/daily_backup.2" "${TARGET_PATH}/daily_backup.3"
+fi
+if [[ -d "${TARGET_PATH}/daily_backup.1" ]]; then
+ mv "${TARGET_PATH}/daily_backup.1" "${TARGET_PATH}/daily_backup.2"
+fi
+
+# Clone the last backup to seed today's run.
+if [[ -d "${TARGET_PATH}/${YESTERDAY}" ]]; then
+ cp -al "${TARGET_PATH}/${YESTERDAY}" "${TARGET_PATH}/${TODAY}" \
+ && echo "Linked ${YESTERDAY} -> ${TODAY}" >>"${LOG_FILE}"
+else
+ mkdir -p "${TARGET_PATH}/${TODAY}"
+fi
+
+RSYNC_CMD=(
+ rsync -av --delete -e ssh "${SOURCE_PATH}" \
+ "${REMOTE_USER}@${REMOTE_IP}:${TARGET_PATH}/${TODAY}"
+)
+
+if "${RSYNC_CMD[@]}"; then
+ echo "Date: ${TODAY} backup completed!" >>"${LOG_FILE}"
+else
+ echo "Date: ${TODAY} backup failed!" >>"${LOG_FILE}"
+fi
+
+echo "Backup completed" >>"${LOG_FILE}"
+```
+
+Adjust logging paths and retention counts to satisfy production requirements
+before automating the process with cron.
diff --git a/Backup/backup-script-basic.txt b/Backup/backup-script-basic.txt
deleted file mode 100644
index 4e54bda..0000000
--- a/Backup/backup-script-basic.txt
+++ /dev/null
@@ -1,86 +0,0 @@
-#!/bin/bash
-####################################
-## Rotate Backup Script 1 ##
-## Author: nduytg ##
-## Version 0.0 - Date: xx/11/17 ##
-####################################
-
-# Tested on CentOS7 #
-
-usage()
-{
- echo "Welcome to my backup script^^"
- echo "Please type in the arguments as follow:"
- echo "./backup source_path remote_user remote_ip target_path"
- echo ""
-}
-
-TODAY="$(date +"%Y-%m-%d")"
-LAST_DAY_BACKUP="date -d "1 day ago" +"%Y-%m-%d""
-echo "Date backup: " $TODAY >> backup_log.log
-#DATE="$(date +"%Y-%m-%d")"
-#RSYNC=/usr/bin/rsync
-#SSH=/usr/bin/ssh
-
-## Sample Comand ##
-#rsync -av --dry-run --delete -e ssh /home/nduytg/source/ root@192.168.31.131:/root/backup
-
-usage
-
-if [ $# -lt 4 ] ; then
- echo "Not enough arguments" >> backup_log.log
- echo "Exit now" >> backup_log.log
- exit
-fi
-
-echo "Test arguments variables"
-echo "$0"
-echo "$1"
-echo "$2"
-echo "$3"
-echo "$4"
-echo "$#"
-
-SOURCE_PATH=$1
-REMOTE_USER=$2
-REMOTE_IP=$3
-TARGET_PATH=$4
-
-BACKUP_CYCLE=7
-CURRENT_BACKUPS="ls -1 | wc -l"
-
-## Check target path if it exists ##
-if [ ! -d "$TARGET_PATH" ] ; then
- mkdir "$TARGET_PATH"
-fi
-
-### Rotate backups ###
-## STEP 1: Remove the oldest backup, if it is exists:
-if [ -d "$TARGET_PATH/daily_backup.3" ] ; then
- rm -rf "$TARGET_PATH/daily_backup.3"
-fi
-
-## Step 2: Shift the middle snapshots(s) back one by one, if they exists ##
-if [ -d "$TARGET_PATH/daily_backup.2" ] ; then
- mv "$TARGET_PATH/daily_backup.2" "$TARGET_PATH/daily_backup.3"
-fi
-
-if [ -d "$TARGET_PATH/daily_backup.1" ] ; then
- mv "$TARGET_PATH/daily_backup.1" "$TARGET_PATH/daily_backup.2"
-fi
-
-## Incremental Backup ##
-cp -al $TARGET_PATH/$LAST_DAY_BACKUP $TARGET_PATH/$TODAY \
- && echo Linking last backup complete >> backup_log.log \
- || echo Failed copy source last backup >> backup_log.log
-
-## Step 4: Rsync from the system into the latest backup ##
-rsync -av \
- --dry-run \
- --delete \
- -e ssh "$SOURCE_PATH" "$REMOTE_USER@$REMOTE_IP:$TARGET_PATH/$TODAY" \
- && echo "Date:" $TODAY "backup completed!" >> backup_log.log \
- || echo "Date:" $TODAY "backup failed!" >> backup_log.log \
-
-
-echo "Backup completed" >> backup_log.log
\ No newline at end of file
diff --git a/Backup/backup-script-phase-1.5.md b/Backup/backup-script-phase-1.5.md
new file mode 100644
index 0000000..62afd02
--- /dev/null
+++ b/Backup/backup-script-phase-1.5.md
@@ -0,0 +1,78 @@
+# Rotate Backup Script (Phase 1.5)
+
+- **Author:** nduytg
+- **Version:** 1.3
+- **Date:** 2017-11-23
+- **Tested on:** CentOS 7
+
+Phase 1.5 reverses the data flow: pull backups from a remote system to local
+storage. Hard links reduce disk usage by reusing unchanged files between runs.
+
+## Usage
+
+```bash
+./backup SOURCE_PATH SOURCE_USER SOURCE_IP LOCAL_PATH
+```
+
+Example:
+
+```bash
+./backup_script.sh /home/nduytg/source nduytg 192.168.31.130 /root/backup
+```
+
+## Script listing
+
+```bash
+#!/bin/bash
+set -euo pipefail
+
+usage() {
+ cat <<'USAGE'
+Usage: ./backup SOURCE_PATH SOURCE_USER SOURCE_IP LOCAL_PATH
+USAGE
+}
+
+TODAY="$(date +%Y-%m-%d_%H-%M)"
+LOG_FILE="backup_log.log"
+
+if [[ $# -lt 4 ]]; then
+ usage
+ {
+ echo "ERROR: Not enough arguments";
+ echo "Exit now";
+ } >>"${LOG_FILE}"
+ exit 1
+fi
+
+SOURCE_PATH=$1
+SOURCE_USER=$2
+SOURCE_IP=$3
+LOCAL_PATH=$4
+
+mkdir -p "${LOCAL_PATH}"
+LATEST_BACKUP="$(ls -1 "${LOCAL_PATH}" | sort -r | head -n1)"
+
+if [[ -n "${LATEST_BACKUP}" && "${LATEST_BACKUP}" != "${TODAY}" ]]; then
+ echo "Linking latest backup to save space" >>"${LOG_FILE}"
+ cp -al "${LOCAL_PATH}/${LATEST_BACKUP}" "${LOCAL_PATH}/${TODAY}" >>"${LOG_FILE}"
+else
+ mkdir -p "${LOCAL_PATH}/${TODAY}"
+fi
+
+RSYNC_CMD=(
+ rsync -av --delete -e ssh \
+ "${SOURCE_USER}@${SOURCE_IP}:${SOURCE_PATH}" \
+ "${LOCAL_PATH}/${TODAY}"
+)
+
+if "${RSYNC_CMD[@]}"; then
+ echo "Date: ${TODAY} backup completed!" >>"${LOG_FILE}"
+else
+ echo "Date: ${TODAY} backup failed!" >>"${LOG_FILE}"
+fi
+
+echo "Backup completed!"
+```
+
+Remove `--dry-run` from the command array after testing to perform real
+synchronizations.
diff --git a/Backup/backup-script-phase-1.5.txt b/Backup/backup-script-phase-1.5.txt
deleted file mode 100644
index c94dcb1..0000000
--- a/Backup/backup-script-phase-1.5.txt
+++ /dev/null
@@ -1,56 +0,0 @@
-#!/bin/bash
-######################################
-## Rotate Backup Script Phase 1.5 ##
-## Author: nduytg ##
-## Version 1.3 - Date: 23/11/17 ##
-######################################
-
-# Tested on CentOS7 #
-
-# Backup with timestamp #
-# Simply pulling backup with rsync FROM a remote host #
-usage()
-{
- echo "Welcome to my backup script^^"
- echo "Please type in the arguments as follow:"
- echo "./backup SOURCE_PATH SOURCE_USER SOURCE_IP LOCAL_PATH"
- echo ""
-}
-# ./backup_script.sh /home/nduytg/source nduytg 192.168.31.130 /root/backup
-
-echo "-----+++Backup Task+++-----"
-TODAY="$(date +"%Y-%m-%e_%H-%M")"
-echo "Date backup: " "$TODAY" >> backup_log.log
-
-usage
-
-if [ $# -lt 4 ] ; then
- echo "ERROR: Not enough arguments" >> backup_log.log
- echo "Exit now" >> backup_log.log
- exit
-fi
-
-#root@192.168.31.131:/root/backup
-
-SOURCE_PATH=$1
-SOURCE_USER=$2
-SOURCE_IP=$3
-LOCAL_PATH=$4
-
-## Step 1: Incremental Backup ##
-LATEST_BACKUP="$(ls -1 $LOCAL_PATH | sort -r | head -n1)"
-if [ "$LATEST_BACKUP" != "" ] && [ "$LATEST_BACKUP" != "$TODAY" ] ; then
- echo "Linking latest backup to save space" >> backup_log.log
- cp -al "$LOCAL_PATH/$LATEST_BACKUP" "$LOCAL_PATH/$TODAY" >> backup_log.log
-else
- echo "No need to hardlink" >> backup_log.log
-fi
-
-## Step 2: Rsync from remote system into the latest backup ##
-rsync -av \
- --dry-run \
- --delete \
- -e ssh "$SOURCE_USER@$SOURCE_IP:$SOURCE_PATH" "$LOCAL_PATH/$TODAY"\
- && echo "Date:" "$TODAY" "backup completed!" >> backup_log.log \
- || echo "Date:" "$TODAY" "backup failed!" >> backup_log.log
-echo "Backup completed!"
\ No newline at end of file
diff --git a/Backup/backup-script-phase-1.md b/Backup/backup-script-phase-1.md
new file mode 100644
index 0000000..702a217
--- /dev/null
+++ b/Backup/backup-script-phase-1.md
@@ -0,0 +1,66 @@
+# Rotate Backup Script (Phase 1)
+
+- **Author:** nduytg
+- **Version:** 1.0
+- **Date:** 2017-11-22
+- **Tested on:** CentOS 7
+
+Phase 1 introduces timestamped backups. Each run creates a new directory on the
+remote host and synchronizes the local source using `rsync`.
+
+## Usage
+
+```bash
+./backup source_path remote_path remote_user remote_ip
+```
+
+## Script listing
+
+```bash
+#!/bin/bash
+set -euo pipefail
+
+usage() {
+ cat <<'USAGE'
+Usage: ./backup source_path remote_path remote_user remote_ip
+USAGE
+}
+
+TODAY="$(date +%Y-%m-%d_%H-%M)"
+LAST_MINUTE="$(date -d '1 minute ago' +%Y-%m-%d_%H-%M)"
+LOG_FILE="backup_log.log"
+
+if [[ $# -lt 4 ]]; then
+ usage
+ {
+ echo "ERROR: Not enough arguments";
+ echo "Exit now";
+ } >>"${LOG_FILE}"
+ exit 1
+fi
+
+SOURCE_PATH=$1
+REMOTE_PATH=$2
+REMOTE_USER=$3
+REMOTE_IP=$4
+
+RSYNC_CMD=(
+ rsync -av \
+ --rsync-path="mkdir -p ${REMOTE_PATH} && rsync" \
+ --delete \
+ -e ssh "${SOURCE_PATH}" \
+ "${REMOTE_USER}@${REMOTE_IP}:${REMOTE_PATH}/${TODAY}"
+)
+
+if "${RSYNC_CMD[@]}"; then
+ echo "Date: ${TODAY} backup completed!" >>"${LOG_FILE}"
+else
+ echo "Date: ${TODAY} backup failed!" >>"${LOG_FILE}"
+fi
+
+echo "Backup completed!"
+```
+
+Use `--link-dest` to enable incremental copies once a previous snapshot exists.
+Set `LAST_MINUTE` to the most recent directory name on the remote host before
+adding the flag.
diff --git a/Backup/backup-script-phase-1.txt b/Backup/backup-script-phase-1.txt
deleted file mode 100644
index 7c77335..0000000
--- a/Backup/backup-script-phase-1.txt
+++ /dev/null
@@ -1,58 +0,0 @@
-#!/bin/bash
-####################################
-## Rotate Backup Script Phase 1 ##
-## Author: nduytg ##
-## Version 1.0 - Date: 22/11/17 ##
-####################################
-
-# Tested on CentOS7 #
-
-# Backup with timestamp #
-# Simply backup with rsync to a remote host #
-usage()
-{
- echo "Welcome to my backup script^^"
- echo "Please type in the arguments as follow:"
- echo "./backup source_path remote_path remote_user remote_ip"
- echo ""
-}
-
-TODAY="$(date +"%Y-%m-%e_%H-%M")"
-LAST_DAY="$(date -d "1 minutes ago" +"%Y-%m-%e_%H-%M")"
-echo $TODAY
-echo $LAST_DAY
-
-echo "Date backup: " "$TODAY" >> backup_log.log
-
-usage
-
-if [ $# -lt 4 ] ; then
- echo "ERROR: Not enough arguments" >> backup_log.log
- echo "Exit now" >> backup_log.log
- exit
-fi
-
-#root@192.168.31.131:/root/backup
-
-SOURCE_PATH=$1
-REMOTE_PATH=$2
-REMOTE_USER=$3
-REMOTE_IP=$4
-
-## Incremental backup via ssh before using Rsync ##
-#ssh $REMOTE_USER@$REMOTE_IP /bin/bash << EOF
-# LATEST_BACKUP="$(ls -1 $REMOTE_PATH | sort -r | head -n1)"
-# cp -al $REMOTE_PATH/$LATEST_BACKUP $REMOTE_PATH/$TODAY
-#EOF
-## ----------In progress------------ ##
-
-## Rsync from the system into the latest backup ##
-rsync -av \
- --rsync-path="mkdir -p "$REMOTE_PATH" && rsync" \
-# --link-dest="$REMOTE_PATH/$LAST_DAY" \
- --dry-run \
- --delete \
- -e ssh "$SOURCE_PATH" "$REMOTE_USER@$REMOTE_IP:$REMOTE_PATH/$TODAY" \
- && echo "Date:" "$TODAY" "backup completed!" >> backup_log.log \
- || echo "Date:" "$TODAY" "backup failed!" >> backup_log.log
-echo "Backup completed!"
\ No newline at end of file
diff --git a/Backup/backup-script-phase-2.md b/Backup/backup-script-phase-2.md
new file mode 100644
index 0000000..74faa12
--- /dev/null
+++ b/Backup/backup-script-phase-2.md
@@ -0,0 +1,78 @@
+# Rotate Backup Script (Phase 2)
+
+- **Author:** nduytg
+- **Version:** 1.3
+- **Date:** 2017-11-23
+- **Tested on:** CentOS 7
+
+Phase 2 enforces retention. When the number of local backups exceeds the
+configured threshold, the script archives and ships the oldest snapshot to a
+remote destination before pruning it locally.
+
+## Usage
+
+```bash
+./backup-script-phase-2.sh /root/backup root@192.168.171.130:/root/backup-san
+```
+
+## Script listing
+
+```bash
+#!/bin/bash
+set -euo pipefail
+
+usage() {
+ cat <<'USAGE'
+Usage: ./backup source_path remote_path
+USAGE
+}
+
+if [[ $# -lt 2 ]]; then
+ usage
+ exit 1
+fi
+
+SOURCE_PATH=$1
+REMOTE_PATH=$2
+MAX_BACKUPS=${MAX_BACKUPS:-7}
+LOG_FILE="rotating_log.log"
+
+NUMBER_OF_BACKUPS=$(find "${SOURCE_PATH}" -mindepth 1 -maxdepth 1 -type d | wc -l)
+
+echo "======= Processing =======" >>"${LOG_FILE}"
+echo "Timestamp: $(date +%Y-%m-%d_%H-%M)" >>"${LOG_FILE}"
+echo "Existing backups: ${NUMBER_OF_BACKUPS}" >>"${LOG_FILE}"
+echo "Max backups: ${MAX_BACKUPS}" >>"${LOG_FILE}"
+
+if (( NUMBER_OF_BACKUPS <= MAX_BACKUPS )); then
+ echo "No rotation required" >>"${LOG_FILE}"
+ exit 0
+fi
+
+while (( NUMBER_OF_BACKUPS > MAX_BACKUPS )); do
+ OLDEST_BACKUP=$(ls -1 "${SOURCE_PATH}" | sort | head -n1)
+ [[ -z "${OLDEST_BACKUP}" ]] && break
+
+ ARCHIVE="${SOURCE_PATH}/${OLDEST_BACKUP}.tar.gz"
+ tar -cvzf "${ARCHIVE}" -C "${SOURCE_PATH}" "${OLDEST_BACKUP}" >>"${LOG_FILE}"
+ rm -rf "${SOURCE_PATH}/${OLDEST_BACKUP}"
+
+ RSYNC_CMD=(
+ rsync -av --remove-source-files -e ssh "${ARCHIVE}" "${REMOTE_PATH}/"
+ )
+
+ if "${RSYNC_CMD[@]}"; then
+ echo "Archive ${ARCHIVE} transferred" >>"${LOG_FILE}"
+ else
+ echo "Archive ${ARCHIVE} transfer failed" >>"${LOG_FILE}"
+ fi
+
+ NUMBER_OF_BACKUPS=$((NUMBER_OF_BACKUPS - 1))
+ echo "Backups remaining: ${NUMBER_OF_BACKUPS}" >>"${LOG_FILE}"
+done
+
+echo "Rotation complete" >>"${LOG_FILE}"
+```
+
+Remove `--dry-run` when ready for production and point `REMOTE_PATH` to durable
+storage (for example, a SAN or object store gateway).
diff --git a/Backup/backup-script-phase-2.txt b/Backup/backup-script-phase-2.txt
deleted file mode 100644
index 6f5e83f..0000000
--- a/Backup/backup-script-phase-2.txt
+++ /dev/null
@@ -1,79 +0,0 @@
-#!/bin/bash
-####################################
-## Rotate Backup Script Phase 2 ##
-## Author: nduytg ##
-## Version 1.3 - Date: 23/11/17 ##
-####################################
-
-# Tested on CentOS7 #
-
-# Usage:
-# ./backup-script-phase-2.sh /root/backup root@192.168.171.130:/root/backup-san
-
-# Backup with timestamp #
-# Simply backup with rsync to a remote host #
-usage()
-{
- echo "Welcome to my backup script^^"
- echo "Please type in the arguments as follow:"
- echo "./backup source_path remote_path"
- echo ""
-}
-
-SOURCE_PATH=$1
-REMOTE_PATH=$2
-TODAY="Time: $(date +"%Y-%m-%e_%H-%M")"
-declare -i MAX_BACKUPS=7
-
-#echo "$TODAY"
-#LAST_DAY_BACKUP="date -d "1 day ago" +"%Y-%m-%d""
-echo "=======+++Processing+++=======" >> rotating_log.log
-echo "Timestamp: " "$TODAY" >> rotating_log.log
-
-## Sample Comand ##
-usage
-
-if [ $# -lt 2 ] ; then
- echo "ERROR: Not enough arguments" >> rotating_log.log
- echo "Exit now" >> rotating_log.log
- exit
-fi
-
-## STEP 0: Count number of backups ##
-NUMBER_OF_BACKUPs=$(ls -1 $SOURCE_PATH 2>/dev/null | wc -l)
-echo "NUMBER_OF_BACKUPs:" "$NUMBER_OF_BACKUPs" >> rotating_log.log
-echo "MAX_BACKUPS:" "$MAX_BACKUPS" >> rotating_log.log
-
-## STEP 1: Check if need to rotating backup
-if [ $NUMBER_OF_BACKUPs -le $MAX_BACKUPS ];
-then
- echo "No need to rotating backup" >> rotating_log.log
- exit 0
-else
- echo "Proceed to rotating backup" >> rotating_log.log
-fi
-
-# Check if number of backup > maximum backups
-while [ $NUMBER_OF_BACKUPs -gt $MAX_BACKUPS ]
-do
- echo "Archive old backups" >> rotating_log.log
- OLDEST_BACKUP="$(ls -1 $SOURCE_PATH | sort | head -n1)"
- echo "OLDEST_BACKUP: " "$OLDEST_BACKUP" >> rotating_log.log
- tar -cvzf "$SOURCE_PATH/$OLDEST_BACKUP.tar.gz" "$SOURCE_PATH/$OLDEST_BACKUP/"
- # Avoid delete the main backup folder
- if [ -n "$OLDEST_BACKUP" ]; then
- rm -rf "${SOURCE_PATH:?}/$OLDEST_BACKUP"
- fi
-
- ## Step 2: Rsync to SAN ##
- rsync -av \
- --dry-run \
- --remove-source-files \
- -e ssh "$SOURCE_PATH/$OLDEST_BACKUP.tar.gz" "$REMOTE_PATH/" \
- && echo "Timestamp: $(date +"%Y-%m-%e_%H-%M")" "archive" "$SOURCE_PATH/$OLDEST_BACKUP.tar.gz" "backup completed!" >> rotating_log.log \
- || echo "Timestamp: $(date +"%Y-%m-%e_%H-%M")" "archive" "$SOURCE_PATH/$OLDEST_BACKUP.tar.gz" "backup failed!" >> rotating_log.log
- let "NUMBER_OF_BACKUPs--"
- echo "Number of backups: " "$NUMBER_OF_BACKUPs" >> rotating_log.log
-done
-
-echo "Rotating backup completed!" >> rotating_log.log
\ No newline at end of file
diff --git a/Backup/backup_basic.md b/Backup/backup_basic.md
new file mode 100644
index 0000000..e903260
--- /dev/null
+++ b/Backup/backup_basic.md
@@ -0,0 +1,51 @@
+# Backup Script with Cron
+
+- **Author:** nduytg
+- **Version:** 0.5
+- **Date:** 2017-11-21
+- **Tested on:** CentOS 7
+
+Use `rsync` to protect local data and push a copy to a remote server on a
+schedule managed by `cron`.
+
+## Local backup
+
+Install `rsync` and run a dry-run to validate which files will be synchronized.
+
+```bash
+sudo yum install rsync
+rsync -av --dry-run --delete /home/nduytg/source/ /home/nduytg/backup/
+```
+
+## Remote backup over SSH
+
+Install the SSH client and `rsync`, then copy the data to a remote host. Running
+with `--dry-run` ensures the command behaves as expected before removing the
+flag for production use.
+
+```bash
+sudo yum install openssh-clients rsync
+rsync -av --dry-run --delete -e ssh /home/nduytg/source/ \
+ root@192.168.31.131:/root/backup
+```
+
+Set up SSH key-based authentication between the control node and the backup
+server so that the cron job can run unattended.
+
+## Schedule the backup
+
+Edit the root user's crontab and create a frequent job while testing.
+
+```bash
+sudo crontab -e
+```
+
+Example entry to synchronize the remote backup every two minutes:
+
+```cron
+*/2 * * * * rsync -av --delete -e ssh /home/nduytg/source/ \
+ root@192.168.31.131:/root/backup > /dev/null 2>&1
+```
+
+Adjust the interval to match production requirements once verification is
+complete.
diff --git a/Backup/backup_basic.txt b/Backup/backup_basic.txt
deleted file mode 100644
index 740e5aa..0000000
--- a/Backup/backup_basic.txt
+++ /dev/null
@@ -1,24 +0,0 @@
-#######################################
-## Backup Script with Crontab #####
-## Author: nduytg #####
-## Version 0.5 - Date: 21/11/17 #####
-#######################################
-
-# Tested on CentOS7
-
-
-### Local Backup ###
-yum install rsync
-rsync –av --dry-run --delete /home/nduytg/source/ /home/nduytg/backup/
-
-### External Backup (via SSH) ###
-yum install ssh rsync
-rsync -av --dry-run --delete -e ssh /home/nduytg/source/ root@192.168.31.131:/root/backup
-
-### Set up Private Key => Auto Login ###
-
-### Schedule backup jobs with crontab ###
-crontab -e
-
-# Backup every 2 minutes #
-*/2 * * * * rsync -av --delete -e ssh /home/nduytg/source/ root@192.168.31.131:/root/backup > /dev/null
diff --git a/Docker/Docker-Guide-Part-1.md b/Docker/Docker-Guide-Part-1.md
new file mode 100644
index 0000000..3e31552
--- /dev/null
+++ b/Docker/Docker-Guide-Part-1.md
@@ -0,0 +1,65 @@
+# Docker Guide Part 1: Install Docker Engine
+
+- **Author:** nduytg
+- **Version:** 1.0
+- **Date:** 2018-01-02
+- **Tested on:** CentOS 7
+
+This section follows the official Docker Engine installation steps for CentOS 7.
+
+## Requirements
+
+- 64-bit operating system
+- Linux kernel 3.10 or later
+
+Reference documentation:
+
+-
+-
+
+## Remove legacy versions
+
+```bash
+sudo yum remove docker docker-common docker-selinux docker-engine
+sudo rm -rf /var/lib/docker
+```
+
+## Configure the Docker repository
+
+Install prerequisites and enable the stable repository. Uncomment the final two
+commands if you wish to opt in to the edge or test channels.
+
+```bash
+sudo yum install -y yum-utils device-mapper-persistent-data lvm2
+sudo yum-config-manager --add-repo \
+ https://download.docker.com/linux/centos/docker-ce.repo
+# sudo yum-config-manager --enable docker-ce-edge
+# sudo yum-config-manager --enable docker-ce-test
+```
+
+## Install and start Docker
+
+```bash
+sudo yum install -y docker-ce
+sudo systemctl enable --now docker
+```
+
+## Verify the installation
+
+```bash
+docker --version
+docker run hello-world
+docker images
+docker search centos
+docker pull centos:centos6
+docker pull centos
+docker inspect centos
+docker inspect hello-world
+docker ps
+docker ps -a
+docker run -it centos:latest
+docker run -d centos:latest
+```
+
+Use `docker run IMAGE` (with either the image name or ID) to start additional
+containers as required.
diff --git a/Docker/Docker-Guide-Part-1.txt b/Docker/Docker-Guide-Part-1.txt
deleted file mode 100644
index 2e45e25..0000000
--- a/Docker/Docker-Guide-Part-1.txt
+++ /dev/null
@@ -1,74 +0,0 @@
-##############################################
-## Docker Guide - Part 1: Install Docker ##
-## Author: nduytg ##
-## Version 1.0 - Date: 2/1/18 ##
-##############################################
-
-# Tested on CentOS 7
-
-# Reference
-# https://docs.docker.com/engine/installation/linux/docker-ce/centos/
-# https://docs.docker.com/get-started/
-
-### Docker requirements ###
-# Kernel version 3.10 or higher (CentOS7, Ubuntu 16,...)
-# 64-bit OS
-
-##### Remove old version of docker #####
-yum remove docker docker-common docker-selinux docker-engine
-
-# Remove all old images, containers and volumes
-rm -rf /var/lib/docker
-
-##### Set up Docker Repository #####
-# Install prerequisite packages
-yum install -y yum-utils device-mapper-persistent-data lvm2
-
-# Enable "stable" docker
-yum-config-manager --add-repo https://download.docker.com/linux/centos/docker-ce.repo
-
-# Enable the "edge" and "test" repo for experiment
-#yum-config-manager --enable docker-ce-edge
-#yum-config-manager --enable docker-ce-test
-
-##### Install Docker #####
-yum install docker-ce
-
-# Start Docker
-systemctl start docker
-systemctl enable docker
-
-# Check Docker version
-[root@centos-docker ~]# docker --version
-Docker version 17.12.0-ce, build c97c6d6
-
-# Run hello-world image on Docker
-docker run hello-world
-
-# List local images
-docker images
-
-# Searching images (on default registry)
-docker search centos
-# Pull specific images
-docker pull centos:centos6
-docker pull centos
-docker images
-
-docker run [name|image-id]
-
-# Inspect local images
-docker inspect centos
-docker inspect hello-world
-
-# List running containers #
-docker ps
-
-# List all containers
-docker ps -a
-
-# run docker interactive, terminal
-docker run -it centos:latest
-
-# run docker as deamon
-docker run -d centos:lastest
diff --git a/Docker/Docker-Guide-Part2.md b/Docker/Docker-Guide-Part2.md
new file mode 100644
index 0000000..d0d2c7f
--- /dev/null
+++ b/Docker/Docker-Guide-Part2.md
@@ -0,0 +1,93 @@
+# Docker Guide Part 2: Build and Share Containers
+
+- **Author:** nduytg
+- **Version:** 1.0
+- **Date:** 2018-01-03
+- **Tested on:** CentOS 7
+
+This lab builds a simple Python/Flask container, publishes it to Docker Hub, and
+runs it locally.
+
+## Prerequisites
+
+- Docker Engine 1.13 or later (see Part 1)
+- Docker Hub account for pushing images
+
+## Create a working directory
+
+```bash
+mkdir -p ~/docker-lab
+cd ~/docker-lab
+```
+
+## Dockerfile
+
+```dockerfile
+FROM python:2.7-slim
+WORKDIR /app
+ADD . /app
+RUN pip install --trusted-host pypi.python.org -r requirements.txt
+EXPOSE 80
+ENV NAME World
+CMD ["python", "app.py"]
+```
+
+## Application code (`app.py`)
+
+```python
+from flask import Flask
+from redis import Redis, RedisError
+import os
+import socket
+
+redis = Redis(host="redis", db=0, socket_connect_timeout=2, socket_timeout=2)
+app = Flask(__name__)
+
+@app.route("/")
+def hello():
+ try:
+ visits = redis.incr("counter")
+ except RedisError:
+ visits = "cannot connect to Redis, counter disabled"
+
+ html = "
Hello {name}!
" \
+ "Hostname: {hostname}
" \
+ "Visits: {visits}"
+ return html.format(
+ name=os.getenv("NAME", "world"),
+ hostname=socket.gethostname(),
+ visits=visits,
+ )
+
+if __name__ == "__main__":
+ app.run(host="0.0.0.0", port=80)
+```
+
+## Dependencies (`requirements.txt`)
+
+```text
+Flask
+Redis
+```
+
+## Build and run locally
+
+```bash
+docker build -t hello1 .
+docker images
+docker run -d -p 4000:80 hello1
+docker container ls
+docker container stop
+```
+
+## Publish to Docker Hub
+
+```bash
+docker login
+docker tag hello1 nduytg/get-started:part2
+docker images
+docker push nduytg/get-started:part2
+docker run nduytg/get-started:part2
+```
+
+Replace `nduytg` with your Docker Hub username when tagging and pushing images.
diff --git a/Docker/Docker-Guide-Part2.txt b/Docker/Docker-Guide-Part2.txt
deleted file mode 100644
index aeba395..0000000
--- a/Docker/Docker-Guide-Part2.txt
+++ /dev/null
@@ -1,103 +0,0 @@
-##############################################
-## Docker Guide - Part 2: Containers ##
-## Author: nduytg ##
-## Version 1.0 - Date: 3/1/18 ##
-##############################################
-
-# Tested on CentOS 7
-
-# Reference
-# https://docs.docker.com/engine/installation/linux/docker-ce/centos/
-# https://docs.docker.com/get-started/
-
-# Prerequisites
-# Install Docker version 1.13 or higher.
-# Read the orientation in Part 1.
-
-### Define a container with Dockerfile ###
-mkdir ~/docker-lab
-cd ~/docker-lab
-
-vi Dockerfile
-#------------Content------------
-# Use an official Python runtime as a parent image
-FROM python:2.7-slim
-
-# Set the working directory to /app
-WORKDIR /app
-
-# Copy the current directory contents into the container at /app
-ADD . /app
-
-# Install any needed packages specified in requirements.txt
-RUN pip install --trusted-host pypi.python.org -r requirements.txt
-
-# Make port 80 available to the world outside this container
-EXPOSE 80
-
-# Define environment variable
-ENV NAME World
-
-# Run app.py when the container launches
-CMD ["python", "app.py"]
-#-------------------------------
-
-### Create the app ###
-vi app.py
-#------------Content------------
-from flask import Flask
-from redis import Redis, RedisError
-import os
-import socket
-
-# Connect to Redis
-redis = Redis(host="redis", db=0, socket_connect_timeout=2, socket_timeout=2)
-
-app = Flask(__name__)
-
-@app.route("/")
-def hello():
- try:
- visits = redis.incr("counter")
- except RedisError:
- visits = "cannot connect to Redis, counter disabled"
-
- html = "Hello {name}!
" \
- "Hostname: {hostname}
" \
- "Visits: {visits}"
- return html.format(name=os.getenv("NAME", "world"), hostname=socket.gethostname(), visits=visits)
-
-if __name__ == "__main__":
- app.run(host='0.0.0.0', port=80)
-#-------------------------------
-
-
-### List requirements ###
-vi requirements.txt
-#------------Content------------
-Flask
-Redis
-#-------------------------------
-
-
-### Build the app ###
-docker build -t hello1 .
-docker images
-
-docker container ls
-
-
-docker container stop
-
-
-### Share your image ###
-# Create Docker account on cloud.docker.com
-docker login
-
-docker tag hello1 nduytg/get-started:part2
-
-docker images
-
-docker push nduytg/get-started:part2
-
-docker run nduytg/get-started:part2
\ No newline at end of file
diff --git a/Docker/Docker-Guide-Part3.md b/Docker/Docker-Guide-Part3.md
new file mode 100644
index 0000000..5a961d3
--- /dev/null
+++ b/Docker/Docker-Guide-Part3.md
@@ -0,0 +1,64 @@
+# Docker Guide Part 3: Services
+
+- **Author:** nduytg
+- **Version:** 1.0
+- **Date:** 2018-01-03
+- **Tested on:** CentOS 7
+
+Deploy a replicated service using Docker Swarm and Docker Compose.
+
+## Prerequisites
+
+Install Docker Compose 1.18.0 (or newer):
+
+```bash
+sudo curl -L \
+ https://github.com/docker/compose/releases/download/1.18.0/docker-compose-"$(uname -s)"-"$(uname -m)" \
+ -o /usr/local/bin/docker-compose
+sudo chmod +x /usr/local/bin/docker-compose
+docker-compose --version
+```
+
+Ensure Docker Engine is installed and Swarm mode is available.
+
+## Compose file
+
+```yaml
+version: "3"
+services:
+ web:
+ image: nduytg/get-started:part2
+ deploy:
+ replicas: 5
+ resources:
+ limits:
+ cpus: "0.1"
+ memory: 50M
+ restart_policy:
+ condition: on-failure
+ ports:
+ - "80:80"
+ networks:
+ - webnet
+networks:
+ webnet:
+```
+
+Replace the image reference with your Docker Hub repository.
+
+## Deploy the stack
+
+```bash
+docker swarm init
+docker stack deploy -c docker-compose.yml getstartedlab
+docker service ls
+docker service ps getstartedlab_web
+for i in {1..5}; do curl -4 http://localhost; done
+```
+
+## Tear everything down
+
+```bash
+docker stack rm getstartedlab
+docker swarm leave --force
+```
diff --git a/Docker/Docker-Guide-Part3.txt b/Docker/Docker-Guide-Part3.txt
deleted file mode 100644
index c87feeb..0000000
--- a/Docker/Docker-Guide-Part3.txt
+++ /dev/null
@@ -1,59 +0,0 @@
-##############################################
-## Docker Guide - Part 3: Services ##
-## Author: nduytg ##
-## Version 1.0 - Date: 3/1/18 ##
-##############################################
-
-# Tested on CentOS 7
-
-# Reference
-# https://docs.docker.com/engine/installation/linux/docker-ce/centos/
-# https://docs.docker.com/get-started/
-# https://docs.docker.com/compose/install/
-
-### Prerequisites ###
-# Install Docker Compose 1.18.0
-curl -L https://github.com/docker/compose/releases/download/1.18.0/docker-compose-`uname -s`-`uname -m` -o /usr/local/bin/docker-compose
-
-chmod +x /usr/local/bin/docker-compose
-
-docker-compose --version
-
-### docker-compose.yml file ###
-vi docker-compose.yml
-#------------Content------------
-version: "3"
-services:
- web:
- # replace username/repo:tag with your name and image details
- image: nduytg/get-started:part2
- deploy:
- replicas: 5
- resources:
- limits:
- cpus: "0.1"
- memory: 50M
- restart_policy:
- condition: on-failure
- ports:
- - "80:80"
- networks:
- - webnet
-networks:
- webnet:
-#-------------------------------
-
-
-### Run new load-balanced app ##
-docker swarm init
-docker stack deploy -c docker-compose.yml getstartedlab
-docker service ls
-docker service ps getstartedlab_web
-
-for i in {1..5} ; do curl -4 http://localhost ; done
-
-### Take down the app and the swarm ###
-docker stack rm getstartedlab
-
-docker swarm leave --force
-
diff --git a/Docker/Docker-Guide-Part4.md b/Docker/Docker-Guide-Part4.md
new file mode 100644
index 0000000..b103baa
--- /dev/null
+++ b/Docker/Docker-Guide-Part4.md
@@ -0,0 +1,80 @@
+# Docker Guide Part 4: Swarm
+
+- **Author:** nduytg
+- **Version:** 1.0
+- **Date:** 2018-01-03
+- **Tested on:** CentOS 7
+
+Use Docker Machine and VirtualBox to provision a two-node Swarm cluster, deploy a
+stack, and tear it down.
+
+## Prerequisites
+
+Install Docker Machine:
+
+```bash
+sudo curl -L \
+ https://github.com/docker/machine/releases/download/v0.13.0/docker-machine-"$(uname -s)"-"$(uname -m)" \
+ -o /usr/local/bin/docker-machine
+sudo chmod +x /usr/local/bin/docker-machine
+docker-machine version
+```
+
+Install VirtualBox and its dependencies:
+
+```bash
+cd /etc/yum.repos.d
+sudo wget http://download.virtualbox.org/virtualbox/rpm/rhel/virtualbox.repo
+sudo yum --enablerepo=epel install -y dkms
+sudo yum groupinstall -y "Development Tools"
+sudo yum install -y kernel-devel
+sudo yum install -y VirtualBox-5.2
+sudo /sbin/vboxconfig
+```
+
+## Provision the Swarm
+
+```bash
+docker-machine create --driver virtualbox myvm1
+docker-machine create --driver virtualbox myvm2
+```
+
+Initialize the cluster from the first node:
+
+```bash
+docker-machine ssh myvm1 "docker swarm init --advertise-addr "
+```
+
+Join the worker:
+
+```bash
+docker-machine ssh myvm2 "docker swarm join --token :2377"
+```
+
+List cluster members:
+
+```bash
+docker-machine ssh myvm1 "docker node ls"
+```
+
+## Deploy a stack
+
+Configure your shell to talk to the manager and launch the stack described in
+`docker-compose.yml` from Part 3.
+
+```bash
+docker-machine env myvm1
+eval "$(docker-machine env myvm1)"
+docker stack deploy -c docker-compose.yml getstartedlab
+for i in {1..5}; do curl -4 http://; done
+docker stack rm getstartedlab
+eval "$(docker-machine env -u)"
+```
+
+## Manage machines
+
+```bash
+docker-machine ls
+docker-machine stop
+docker-machine start
+```
diff --git a/Docker/Docker-Guide-Part4.txt b/Docker/Docker-Guide-Part4.txt
deleted file mode 100644
index 6d4cdd6..0000000
--- a/Docker/Docker-Guide-Part4.txt
+++ /dev/null
@@ -1,84 +0,0 @@
-##############################################
-## Docker Guide - Part 4: Swarm ##
-## Author: nduytg ##
-## Version 1.0 - Date: 3/1/18 ##
-##############################################
-
-# Tested on CentOS 7
-
-# Reference
-# https://docs.docker.com/engine/installation/linux/docker-ce/centos/
-# https://docs.docker.com/get-started/
-# https://docs.docker.com/machine/install-machine/
-# https://wiki.centos.org/HowTos/Virtualization/VirtualBox
-
-##### Prerequisites #####
-### Install Docker Machine ###
-curl -L https://github.com/docker/machine/releases/download/v0.13.0/docker-machine-`uname -s`-`uname -m` >/tmp/docker-machine && \
-chmod +x /tmp/docker-machine && cp /tmp/docker-machine /usr/local/bin/docker-machine
-
-docker-machine version
-
-### Install VirtualBox ###
-cd /etc/yum.repos.d
-wget http://download.virtualbox.org/virtualbox/rpm/rhel/virtualbox.repo
-
-# Install DKMS (Dynamic Kernel Module) for VirtualBox
-yum --enablerepo=epel install dkms
-yum groupinstall "Development Tools"
-yum install kernel-devel
-
-# Install VirtualBox 5.2
-yum install VirtualBox-5.2.x86_64
-
-# Recompile kernel module
-sudo /sbin/vboxconfig
-
-### Set up your Swarm ###
-# Create a cluster #
-docker-machine create --driver virtualbox myvm1
-docker-machine create --driver virtualbox myvm2
-
-# Initialize the swarm and add nodes
-docker-machine ssh myvm1 "docker swarm init --advertise-addr "
-
-#Swarm initialized: current node (fon41sug27y9mwn3asfqo3ork) is now a manager.
-#To add a worker to this swarm, run the following command:
-#
-# docker swarm join --token SWMTKN-1-5c0ya8i59hs18zvc30z7ssnbsr3vq709edg7loc2rydia3oksf-8wu15o42wti2k1pcafk86vdvm 192.168.99.100:2377
-#
-#To add a manager to this swarm, run 'docker swarm join-token manager' and follow the instructions.
-
-# Join the swarm
-docker-machine ssh myvm2 "docker swarm join --token SWMTKN-1-5c0ya8i59hs18zvc30z7ssnbsr3vq709edg7loc2rydia3oksf-8wu15o42wti2k1pcafk86vdvm 192.168.99.100:2377"
-
-# List the nodes in the swarm
-docker-machine ssh myvm1 "docker node ls"
-
-# Deploy app on the swarm cluster
-docker-machine ssh myvm1 "docker node ls"
-
-# Configure a docker-machine shell to the swarm manager
-docker-machine env myvm1
-eval $(docker-machine env myvm1)
-
-docker stack deploy -c docker-compose.yml getstartedlab
-
-# Test app
-for i in {1..5} ; do curl -4 http:// ; done
-
-# Remove the stack
-docker stack rm getstartedlab
-
-# Unsetting docker-machine shell
-eval $(docker-machine env -u)
-
-# Stop/Start docker machines
-docker-machine ls
-
-docker-machine start/stop
-
-
-
-
-
diff --git a/Docker/Docker-Guide-Part5.md b/Docker/Docker-Guide-Part5.md
new file mode 100644
index 0000000..c1d07a4
--- /dev/null
+++ b/Docker/Docker-Guide-Part5.md
@@ -0,0 +1,93 @@
+# Docker Guide Part 5: Stacks
+
+- **Author:** nduytg
+- **Version:** 1.0
+- **Date:** 2018-01-04
+- **Tested on:** CentOS 7
+
+Extend the Swarm deployment by adding a visualizer service and persistent Redis
+backend to the stack.
+
+## Start the Swarm
+
+```bash
+docker-machine ls
+docker-machine start myvm1
+docker-machine start myvm2
+docker-machine ssh myvm1 "docker node ls"
+```
+
+## Compose file with visualizer
+
+```yaml
+version: "3"
+services:
+ web:
+ image: nduytg/get-started:part2
+ deploy:
+ replicas: 5
+ restart_policy:
+ condition: on-failure
+ resources:
+ limits:
+ cpus: "0.1"
+ memory: 50M
+ ports:
+ - "80:80"
+ networks:
+ - webnet
+ visualizer:
+ image: dockersamples/visualizer:stable
+ ports:
+ - "8080:8080"
+ volumes:
+ - /var/run/docker.sock:/var/run/docker.sock
+ deploy:
+ placement:
+ constraints: [node.role == manager]
+ networks:
+ - webnet
+networks:
+ webnet:
+```
+
+## Deploy the stack
+
+```bash
+docker-machine env myvm1
+eval "$(docker-machine env myvm1)"
+docker stack deploy -c docker-compose.yml getstartedlab
+docker stack ps getstartedlab
+```
+
+Access the visualizer at `http://:8080`.
+
+## Add Redis for persistent state
+
+Update `docker-compose.yml` to include Redis:
+
+```yaml
+ redis:
+ image: redis
+ ports:
+ - "6379:6379"
+ volumes:
+ - /home/docker/data:/data
+ deploy:
+ placement:
+ constraints: [node.role == manager]
+ command: redis-server --appendonly yes
+ networks:
+ - webnet
+```
+
+Create the data directory on the manager and redeploy.
+
+```bash
+docker-machine ssh myvm1 "mkdir -p /home/docker/data"
+docker stack deploy -c docker-compose.yml getstartedlab
+docker service ls
+```
+
+Visit `http://` to validate the front-end and confirm Redis persists
+state between restarts.
diff --git a/Docker/Docker-Guide-Part5.txt b/Docker/Docker-Guide-Part5.txt
deleted file mode 100644
index 393b0ce..0000000
--- a/Docker/Docker-Guide-Part5.txt
+++ /dev/null
@@ -1,125 +0,0 @@
-##############################################
-## Docker Guide - Part 4: Stacks ##
-## Author: nduytg ##
-## Version 1.0 - Date: 4/1/18 ##
-##############################################
-
-# Tested on CentOS 7
-
-# Reference
-# https://docs.docker.com/engine/installation/linux/docker-ce/centos/
-# https://docs.docker.com/get-started/
-# https://docs.docker.com/machine/install-machine/
-
-
-##### Boot up your swarm #####
-docker-machine ls
-docker-machine start myvm1
-docker-machine start myvm2
-
-docker-machine ssh myvm1 "docker node ls"
-
-##### Edit docker-compose.yml #####
-vi docker-compose.yml
-#------------------Content------------------
-version: "3"
-services:
- web:
- # replace username/repo:tag with your name and image details
- image: nduytg/get-started:part2
- deploy:
- replicas: 5
- restart_policy:
- condition: on-failure
- resources:
- limits:
- cpus: "0.1"
- memory: 50M
- ports:
- - "80:80"
- networks:
- - webnet
- visualizer:
- image: dockersamples/visualizer:stable
- ports:
- - "8080:8080"
- volumes:
- - "/var/run/docker.sock:/var/run/docker.sock"
- deploy:
- placement:
- constraints: [node.role == manager]
- networks:
- - webnet
-networks:
- webnet:
-#---------------------------------------------
-
-# Configure a docker-machine shell to the swarm manager
-docker-machine env myvm1
-eval $(docker-machine env myvm1)
-
-# Deploy your stack
-docker stack deploy -c docker-compose.yml getstartedlab
-
-# Go to http://:8080
-# Or
-docker stack ps getstartedlab
-
-##### Update docker-compose.yml with Redis (storing app data)
-vi docker-compose.yml
-#------------------Content------------------
-version: "3"
-services:
- web:
- # replace username/repo:tag with your name and image details
- image: nduytg/get-started:part2
- deploy:
- replicas: 5
- restart_policy:
- condition: on-failure
- resources:
- limits:
- cpus: "0.1"
- memory: 50M
- ports:
- - "80:80"
- networks:
- - webnet
- visualizer:
- image: dockersamples/visualizer:stable
- ports:
- - "8080:8080"
- volumes:
- - "/var/run/docker.sock:/var/run/docker.sock"
- deploy:
- placement:
- constraints: [node.role == manager]
- networks:
- - webnet
- redis:
- image: redis
- ports:
- - "6379:6379"
- volumes:
- - /home/docker/data:/data
- deploy:
- placement:
- constraints: [node.role == manager]
- command: redis-server --appendonly yes
- networks:
- - webnet
-networks:
- webnet:
-#---------------------------------------------
-
-#Create a ./data directory on the manager:
-docker-machine ssh myvm1 "mkdir ./data"
-
-docker stack deploy -c docker-compose.yml getstartedlab
-
-docker service ls
-
-# Goto http://
-
-
-
diff --git a/Docker/[Network] Docker-Guide-Network.md b/Docker/[Network] Docker-Guide-Network.md
new file mode 100644
index 0000000..41c122c
--- /dev/null
+++ b/Docker/[Network] Docker-Guide-Network.md
@@ -0,0 +1,84 @@
+# Docker Guide: Networking
+
+- **Author:** nduytg
+- **Version:** 1.0
+- **Date:** 2018-01-09
+- **Tested on:** CentOS 7
+
+Explore Docker networking fundamentals including default bridges, user-defined
+networks, port publishing, and static IP assignments.
+
+## Inspect existing networks
+
+```bash
+docker network ls
+ip addr show
+```
+
+## Default bridge network
+
+Inspect the default bridge and run two containers that share it.
+
+```bash
+docker network inspect bridge
+docker run -itd --name container1 busybox
+docker run -itd --name container2 busybox
+docker attach container1
+ip -4 addr
+exit
+```
+
+## User-defined bridge network
+
+```bash
+docker network create --driver bridge my_bridge_net
+docker run --network my_bridge_net -itd --name container3 busybox
+docker network inspect my_bridge_net
+```
+
+## Publish container ports
+
+```bash
+docker run -d -p 80 nginx # random host port
+docker ps
+docker run -d -p 8080:80 nginx # explicit host port
+docker ps
+```
+
+## Assign static IP addresses
+
+### Docker CLI
+
+```bash
+docker network create --subnet 172.20.0.0/16 mynet123
+docker run --net mynet123 --ip 172.20.0.99 -p 8080:80 \
+ --hostname -d nginx
+curl 172.20.0.99:80
+curl localhost:8080
+```
+
+### Docker Compose
+
+```yaml
+version: "3"
+services:
+ nginx:
+ image: nginx
+ container_name: my-nginx
+ networks:
+ mynet123:
+ ipv4_address: 192.168.0.99
+networks:
+ mynet123:
+ driver: bridge
+ ipam:
+ config:
+ - subnet: 192.168.0.0/24
+```
+
+```bash
+docker-compose up -d
+docker ps
+docker network ls
+curl 192.168.0.99
+```
diff --git a/Docker/[Network] Docker-Guide-Network.txt b/Docker/[Network] Docker-Guide-Network.txt
deleted file mode 100644
index 69ef05c..0000000
--- a/Docker/[Network] Docker-Guide-Network.txt
+++ /dev/null
@@ -1,91 +0,0 @@
-##################################
-## Docker Guide - Network ##
-## Author: nduytg ##
-## Version 1.0 - Date: 9/1/18 ##
-##################################
-
-# Tested on CentOS 7
-
-# Reference
-# https://docs.docker.com/engine/userguide/networking/
-#
-
-# List current network
-docker network ls
-
-ip addr show
-
-##### The default bridge network #####
-### Examine the bridge network
-docker network inspect bridge
-
-### Start 2 busybox container connected to the default bridge network
-docker run -itd --name=container1 busybox
-
-docker run -itd --name=container2 busybox
-
-# Examine how network looks from inside the container
-docker attach container1
-ip -4 addr
-
-
-##### User-defined networks #####
-### Bridge Network ###
-# Create your own bridge network
-docker network ls
-docker network create --driver bridge my_bridge_net
-
-docker network ls
-
-docker run --network=my_bridge_net -itd --name=container3 busybox
-
-docker network inspect my_bridge_net
-
-##### Exposing and publishing ports #####
-# Publish port 80 on nginx container to a random high port (higher than 30000)
-docker run -it -d -p 80 nginx
-docker ps
-
-# Publish port 80 on nginx container to port 8080 on host machine
-docker run -it -d -p 8080:80 nginx
-docker ps
-
-##### Set static ip for container #####
-### Option 1: From Docker CLI
-docker network create --subnet=172.20.0.0/16 mynet123
-docker run --net mynet123 --ip 172.20.0.99 -p 8080:80 --hostname -it -d nginx
-
-curl 172.20.0.99:80
-#or
-curl localhost:8080
-
-### Option 2: In docker-compose.yml
-vi docker-compose.yml
-#--------Content--------
-version: '3'
-services:
- nginx:
- image: nginx
- container_name: my-nginx
- networks:
- mynet123:
- ipv4_address: 192.168.0.99
-networks:
- mynet123:
- driver: bridge
- ipam:
- config:
- - subnet: 192.168.0.0/24
-
-#----------------------
-
-docker-compose up -d
-docker ps
-docker network ls
-curl 192.168.0.99
-
-
-
-
-
-
diff --git a/Docker/[Storage] Docker-Guide-Bind-Mount.md b/Docker/[Storage] Docker-Guide-Bind-Mount.md
new file mode 100644
index 0000000..f0e6646
--- /dev/null
+++ b/Docker/[Storage] Docker-Guide-Bind-Mount.md
@@ -0,0 +1,45 @@
+# Docker Guide: Bind Mounts
+
+- **Author:** nduytg
+- **Version:** 0.1
+- **Date:** 2018-01-05
+- **Tested on:** CentOS 7
+
+Bind mounts map a directory or file from the host into a container. They are
+ideal for development workflows and when the host needs direct control over the
+content.
+
+## Start a container with a bind mount
+
+### Using `--mount`
+
+```bash
+mkdir -p $(pwd)/site-content
+echo "Hello from bind mount" > $(pwd)/site-content/index.html
+
+docker run -d \
+ --name devtest \
+ --mount type=bind,source="$(pwd)/site-content",target=/usr/share/nginx/html \
+ -p 8080:80 nginx
+```
+
+### Using `-v`
+
+```bash
+docker run -d \
+ --name devtest-v \
+ -v "$(pwd)/site-content":/usr/share/nginx/html \
+ -p 8081:80 nginx
+```
+
+## Validate the mount
+
+```bash
+curl http://localhost:8080
+curl http://localhost:8081
+docker exec devtest ls /usr/share/nginx/html
+docker exec devtest cat /usr/share/nginx/html/index.html
+```
+
+Changes you make to the files under `site-content` on the host appear instantly
+inside both containers.
diff --git a/Docker/[Storage] Docker-Guide-Bind-Mount.txt b/Docker/[Storage] Docker-Guide-Bind-Mount.txt
deleted file mode 100644
index afe1874..0000000
--- a/Docker/[Storage] Docker-Guide-Bind-Mount.txt
+++ /dev/null
@@ -1,29 +0,0 @@
-##################################
-## Docker Guide - Bind Mount ##
-## Author: nduytg ##
-## Version 0.1 - Date: 5/1/18 ##
-##################################
-
-# Tested on CentOS 7
-
-# Reference
-# https://docs.docker.com/engine/admin/volumes/bind-mounts/
-# http://training.play-with-docker.com/docker-volumes/
-
-
-##### Start a container with a bind mount ####
-# Option 1: --mount
-docker run -d \
- -it \
- --name devtest \
- --mount type=bind
-
-# Option 2: -v
-
-
-
-
-
-
-
-
diff --git a/Docker/[Storage] Docker-Guide-Volume.md b/Docker/[Storage] Docker-Guide-Volume.md
new file mode 100644
index 0000000..0868e38
--- /dev/null
+++ b/Docker/[Storage] Docker-Guide-Volume.md
@@ -0,0 +1,102 @@
+# Docker Guide: Volumes
+
+- **Author:** nduytg
+- **Version:** 0.9
+- **Date:** 2018-01-05
+- **Tested on:** CentOS 7
+
+Docker volumes provide persistent storage managed by the Docker engine. This
+reference covers creation, usage with containers and services, and read-only
+mounts.
+
+## Create and manage volumes
+
+```bash
+docker volume create my-vol
+docker volume ls
+docker volume inspect my-vol
+docker volume rm my-vol
+```
+
+## Attach a volume to a container
+
+### Using `--mount`
+
+```bash
+docker volume create myvol2
+docker run -d \
+ --name devtest \
+ --mount source=myvol2,target=/app \
+ nginx:latest
+```
+
+### Using `-v`
+
+```bash
+docker run -d \
+ --name devtest-v \
+ -v myvol2:/app \
+ nginx:latest
+```
+
+Inspect the container to confirm the mount point:
+
+```bash
+docker inspect devtest --format '{{ json .Mounts }}' | jq
+```
+
+Stop and remove the container when finished:
+
+```bash
+docker container stop devtest devtest-v
+docker container rm devtest devtest-v
+docker volume rm myvol2
+```
+
+## Use a volume with a Swarm service
+
+```bash
+docker volume create myvol2
+docker swarm init
+docker service create -d \
+ --replicas 4 \
+ --name devtest-service \
+ --mount source=myvol2,target=/app \
+ nginx:latest
+docker service ps devtest-service
+docker volume ls
+docker service rm devtest-service
+docker volume rm myvol2
+```
+
+## Populate a volume
+
+```bash
+docker run -d \
+ --name nginxtest \
+ --mount source=nginx-vol,destination=/usr/share/nginx/html \
+ nginx:latest
+ls /var/lib/docker/volumes/nginx-vol/_data
+docker container stop nginxtest
+docker container rm nginxtest
+docker volume rm nginx-vol
+```
+
+Repeat the same steps with the `-v` flag if you prefer the short syntax.
+
+## Read-only volumes
+
+```bash
+docker volume create nginx-vol
+docker run -d \
+ --name nginxtest \
+ --mount source=nginx-vol,destination=/usr/share/nginx/html,readonly \
+ nginx:latest
+docker inspect nginxtest --format '{{ json .Mounts }}' | jq
+docker container stop nginxtest
+docker container rm nginxtest
+docker volume rm nginx-vol
+```
+
+To use the short syntax, replace the `--mount` flag with
+`-v nginx-vol:/usr/share/nginx/html:ro`.
diff --git a/Docker/[Storage] Docker-Guide-Volume.txt b/Docker/[Storage] Docker-Guide-Volume.txt
deleted file mode 100644
index f543cfa..0000000
--- a/Docker/[Storage] Docker-Guide-Volume.txt
+++ /dev/null
@@ -1,156 +0,0 @@
-##################################
-## Docker Guide - Volume ##
-## Author: nduytg ##
-## Version 0.9 - Date: 5/1/18 ##
-##################################
-
-# Tested on CentOS 7
-
-# Reference
-# https://docs.docker.com/engine/admin/volumes/volumes/#choose-the--v-or-mount-flag
-# http://training.play-with-docker.com/docker-volumes/
-
-##### Create and manage volumes #####
-docker volume create my-vol
-
-# List volumes
-docker volume ls
-
-# Insprect a volume
-docker volume inspect my-vol
-
-# Remove a volume
-docker volume rm my-vol
-
-##### Start a container with a volume #####
-### Option 1: --mount ###
-docker run -d \
- -it \
- --name devtest \
- --mount source=myvol2,target=/app \
- nginx:latest
-
-### Option 2: -v ###
-docker run -d \
- -it \
- --name devtest \
- -v myvol2:/app \
- nginx:latest
-
-docker inspect devtest
-
-#----------------Result----------------
-"Mounts": [
- {
- "Type": "volume",
- "Name": "myvol2",
- "Source": "/var/lib/docker/volumes/myvol2/_data",
- "Destination": "/app",
- "Driver": "local",
- "Mode": "z",
- "RW": true,
- "Propagation": ""
- }
- ],
-#--------------------------------------
-
-# Stop the container
-docker container stop devtest
-docker container rm devtest
-
-# Remove the volume
-docker volume rm myvol2
-
-##### Start a service with volumes #####
-# Start a swarm
-docker swarm init
-
-# Start the service
-docker service create -d \
- --replicas=4 \
- --name devtest-service \
- --mount source=myvol2,target=/app \
- nginx:latest
-
-# Verify that the service is running
-docker service ps devtest-service
-
-# Verify if the volume exists
-docker volume ls
-
-# Remove the serivce
-docker service rm devtest-service
-
-# Verify if the volume still exists (it do!!)
-docker volume ls
-
-##### Populate a volume using a container #####
-# Option 1: --mount
-docker run -d \
- -it \
- --name=nginxtest \
- --mount source=nginx-vol,destination=/usr/share/nginx/html \
- nginx:latest
-
-# Option 2: -v
-docker run -d \
- -it \
- --name=nginxtest \
- -v nginx-vol:/usr/share/nginx/html \
- nginx:latest
-
-# Check the contents of volume
-ls /var/lib/docker/volumes/nginx-vol/_data
-
-# Clean up the container and volume
-docker container stop nginxtest
-
-docker container rm nginxtest
-
-docker volume rm nginx-vol
-
-##### Read-only volume #####
-# Option 1: --mount
-docker run -d \
- -it \
- --name=nginxtest \
- --mount source=nginx-vol,destination=/usr/share/nginx/html,readonly \
- nginx:latest
-
-# Option 2: -v
-docker run -d \
- -it \
- --name=nginxtest \
- -v nginx-vol:/usr/share/nginx/html:ro \
- nginx:latest
-
-#----------------Result----------------
-"Mounts": [
- {
- "Type": "volume",
- "Name": "nginx-vol",
- "Source": "/var/lib/docker/volumes/nginx-vol/_data",
- "Destination": "/usr/share/nginx/html",
- "Driver": "local",
- "Mode": "z",
- "RW": false,
- "Propagation": ""
- }
- ],
-#--------------------------------------
-
-# Clean up the container and volume
-docker container stop nginxtest
-
-docker container rm nginxtest
-
-docker volume rm nginx-vol
-
-##### Use a volume driver #####
-...(not yet)...
-
-
-
-
-
-
diff --git a/Docker/[Storage] Docker-Guide-tmpfs-Mount.md b/Docker/[Storage] Docker-Guide-tmpfs-Mount.md
new file mode 100644
index 0000000..1a8a7da
--- /dev/null
+++ b/Docker/[Storage] Docker-Guide-tmpfs-Mount.md
@@ -0,0 +1,44 @@
+# Docker Guide: `tmpfs` Mounts
+
+- **Author:** nduytg
+- **Version:** 0.1
+- **Date:** 2018-01-05
+- **Tested on:** CentOS 7
+
+`tmpfs` mounts keep data in memory and are ideal for ephemeral state that should
+never hit disk.
+
+## Start a container with a `tmpfs` mount
+
+### Using `--mount`
+
+```bash
+docker run -d \
+ --name tmpfs-example \
+ --mount type=tmpfs,destination=/tmp/cache,tmpfs-size=64m \
+ busybox sleep 3600
+```
+
+### Using `--tmpfs`
+
+```bash
+docker run -d \
+ --name tmpfs-short \
+ --tmpfs /tmp/cache:rw,size=64m \
+ busybox sleep 3600
+```
+
+## Validate the mount
+
+```bash
+docker exec tmpfs-example df -h /tmp/cache
+docker exec tmpfs-example sh -c 'echo data > /tmp/cache/example'
+docker exec tmpfs-example cat /tmp/cache/example
+docker exec tmpfs-example du -sh /tmp/cache
+```
+
+Stop and remove the containers when finished:
+
+```bash
+docker rm -f tmpfs-example tmpfs-short
+```
diff --git a/Docker/[Storage] Docker-Guide-tmpfs-Mount.txt b/Docker/[Storage] Docker-Guide-tmpfs-Mount.txt
deleted file mode 100644
index 07e9547..0000000
--- a/Docker/[Storage] Docker-Guide-tmpfs-Mount.txt
+++ /dev/null
@@ -1,29 +0,0 @@
-##################################
-## Docker Guide - tmpfs Mount ##
-## Author: nduytg ##
-## Version 0.1 - Date: 5/1/18 ##
-##################################
-
-# Tested on CentOS 7
-
-# Reference
-# https://docs.docker.com/engine/admin/volumes/bind-mounts/
-# http://training.play-with-docker.com/docker-volumes/
-
-
-##### Start a container with a bind mount ####
-# Option 1: --mount
-docker run -d \
- -it \
- --name devtest \
- --mount type=bind
-
-# Option 2: -v
-
-
-
-
-
-
-
-
diff --git a/Firewall/IPtables_basic.md b/Firewall/IPtables_basic.md
new file mode 100644
index 0000000..bc1ea2b
--- /dev/null
+++ b/Firewall/IPtables_basic.md
@@ -0,0 +1,92 @@
+# Configure iptables (CentOS 7)
+
+- **Author:** nduytg
+- **Version:** 1.1
+- **Date:** 2017-11-14
+- **Tested on:** CentOS 7
+
+Lock down a host so that only SSH, HTTP, and HTTPS are permitted inbound, while
+DNS and NTP remain available for outbound lookups.
+
+## Install and enable iptables services
+
+```bash
+sudo yum install -y iptables-services
+sudo systemctl stop firewalld
+sudo systemctl disable firewalld
+sudo systemctl enable --now iptables
+sudo systemctl status iptables
+```
+
+## Inspect and reset rules
+
+```bash
+sudo iptables -L
+sudo iptables -S
+sudo iptables -F
+```
+
+## Configure inbound policy
+
+```bash
+# Allow loopback traffic
+sudo iptables -A INPUT -i lo -j ACCEPT
+
+# Permit established/related sessions and drop invalid packets
+sudo iptables -A INPUT -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
+sudo iptables -A INPUT -m conntrack --ctstate INVALID -j DROP
+
+# Allow SSH, HTTP, and HTTPS
+sudo iptables -A INPUT -i -d -p tcp --dport 22 -j ACCEPT
+sudo iptables -A INPUT -i -d -p tcp --dport 80 -j ACCEPT
+sudo iptables -A INPUT -i -d -p tcp --dport 443 -j ACCEPT
+
+# Optional: allow specific source ranges
+sudo iptables -A INPUT -i -m iprange --src-range 10.0.0.20-10.0.0.35 -j ACCEPT
+
+# Allow NTP responses
+sudo iptables -A INPUT -p udp --sport 123 -j ACCEPT
+
+# Default drop policy
+sudo iptables -P INPUT DROP
+```
+
+## Configure outbound policy
+
+```bash
+# Allow loopback traffic
+sudo iptables -A OUTPUT -o lo -j ACCEPT
+
+# Permit established sessions
+sudo iptables -A OUTPUT -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
+
+# Allow outbound SSH and web traffic
+sudo iptables -A OUTPUT -o -p tcp --sport 22 -j ACCEPT
+sudo iptables -A OUTPUT -o -p tcp --sport 80 -j ACCEPT
+sudo iptables -A OUTPUT -o -p tcp --sport 443 -j ACCEPT
+sudo iptables -A OUTPUT -o -p tcp --dport 80 -j ACCEPT
+sudo iptables -A OUTPUT -o -p tcp --dport 443 -j ACCEPT
+
+# Allow DNS and NTP queries
+sudo iptables -A OUTPUT -p udp --dport 53 -j ACCEPT
+sudo iptables -A OUTPUT -p udp --dport 123 -j ACCEPT
+
+# Default drop policy
+sudo iptables -P OUTPUT DROP
+```
+
+## Disable forwarding
+
+```bash
+sudo iptables -P FORWARD DROP
+```
+
+## Persist the configuration
+
+```bash
+sudo service iptables save
+# or
+sudo iptables-save | sudo tee /etc/sysconfig/iptables
+```
+
+Restore saved rules on boot with `iptables-restore` if required.
diff --git a/Firewall/IPtables_basic.txt b/Firewall/IPtables_basic.txt
deleted file mode 100644
index 19c5422..0000000
--- a/Firewall/IPtables_basic.txt
+++ /dev/null
@@ -1,105 +0,0 @@
-#################################################
- Configure IPtables
- Author: nduytg
- Version 1.1 - Date: 14/11/17
-#################################################
-
-# Tested on CentOS7
-
-# Reference
-
-# Configure IPtables to block all port except:
-# Inbound TCP 22, TCP 80, TCP 443
-# Outbound: UDP 53, UDP 123
-
-### Install IPtables ###
-yum install iptables-services
-
-### Stop FirewallD Service and Start IPtables Service ###
-systemctl stop firewalld
-systemctl disable firewalld
-systemctl start iptables
-systemctl enable iptables
-systemctl status iptables
-
-### List the currently configured iptables rules ###
-iptables -L
-iptables -S
-
-### Flush all current rules from iptables ###
-iptables -F
-
-### Allow INBOUND connections ###
-
-## Rules are evaluated in order, put busiet rules at the front!! ##
-## Accept all traffic to the looback interface, ##
-## which is necessary for many applications and services ##
-iptables -A INPUT -i lo -j ACCEPT
-
-### Stateful table ###
-## Allow traffic from existing connections or new connection related to these connections ##
-iptables -A INPUT -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
-
-## Block invalid packets ##
-iptables -A INPUT -m conntrack --ctstate INVALID -j DROP
-
-## Allow inbound port 22, 80, 443 ##
-iptables -A INPUT -i -d -p tcp --dport 80 -j ACCEPT
-iptables -A INPUT -i -d -p tcp --dport 443 -j ACCEPT
-iptables -A INPUT -i -d -p tcp --dport 22 -j ACCEPT
-
-## Allow IP range
-iptables -A INPUT -i -m iprange --src-range 10.0.0.20-10.0.0.35 -j ACCEPT
-
-## Block dropped packets ##
-#iptables -A INPUT -j LOG --log-prefix "IPTables-Dropped: "
-
-## Allow DNS server (UDP/TCP) return result ##
-## If use stateless table, enable the two below ##
-#iptables -A INPUT -i -p tcp --sport 53 -m conntrack --ctstate NEW,ESTABLISHED -j ACCEPT
-#iptables -A INPUT -i -p udp --sport 53 -m conntrack --ctstate NEW,ESTABLISHED -j ACCEPT
-
-## Allow NTP Server return result ##
-iptables -A INPUT -p udp --sport 123 -j ACCEPT
-
-## Except the listed above, other connections will be dropped ##
-iptables -t filter -P INPUT DROP
---------------------------------------
-
-### Allow OUTBOUND connections ###
-## Accept all traffic to the looback interface, ##
-## which is necessary for many applications and services ##
-iptables -A OUTPUT -o lo -j ACCEPT
-
-## Allow Established outgoing connections ##
-iptables -A OUTPUT -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
-
-## Allow outbound SSH, Web Traffic ##
-iptables -A OUTPUT -o -p tcp --sport 80 -j ACCEPT
-iptables -A OUTPUT -o -p tcp --sport 443 -j ACCEPT
-iptables -A OUTPUT -o -p tcp --sport 22 -j ACCEPT
-
-## Allow HTTP/HTTPS traffic to other server (yum install) ##
-iptables -A OUTPUT -o -p tcp --dport 80 -j ACCEPT
-iptables -A OUTPUT -o -p tcp --dport 443 -j ACCEPT
-
-## Allow DNS (TCP/UDP port 53), NTP (port 123) ##
-iptables -A OUTPUT -p udp --dport 53 -j ACCEPT
-iptables -A OUTPUT -p udp --dport 123 -j ACCEPT
-
-## Block dropped packets ##
-#iptables -A OUTPUT -j LOG --log-prefix "IPTables-Dropped: "
-
-## Except the listed above, other connections will be dropped ##
-iptables -t filter -P OUTPUT DROP
-
-### Block forwarding traffic ###
-# This configuration is for single host set-up, not router #
-iptables -P FORWARD DROP
-
-### Save current chain rules (persist after rebooting) ###
-systemctl iptables status
-iptables-save > /etc/sysconfig/iptables
-
-### Reload saved rules (on startup) ###
-iptables-restore < ~/ipt.rules
diff --git a/High Availability/keepalived-nginx.md b/High Availability/keepalived-nginx.md
new file mode 100644
index 0000000..af1da3e
--- /dev/null
+++ b/High Availability/keepalived-nginx.md
@@ -0,0 +1,192 @@
+# High Availability with Keepalived and NGINX
+
+- **Author:** nduytg
+- **Version:** 0.9
+- **Date:** 2017-12-12
+- **Tested on:** CentOS 7
+
+Deploy an active-active pair of NGINX servers protected by VRRP virtual IPs
+managed by Keepalived.
+
+## Installation
+
+### Option 1: YUM packages
+
+```bash
+sudo yum update -y
+sudo yum install -y gcc kernel-headers kernel-devel keepalived
+```
+
+### Option 2: Build from source
+
+```bash
+sudo yum install -y gcc kernel-headers kernel-devel
+mkdir -p ~/keepalived_source
+cd ~/keepalived_source
+wget http://keepalived.org/software/keepalived-1.3.9.tar.gz
+tar -xvzf keepalived-1.3.9.tar.gz
+cd keepalived-1.3.9
+./configure --with-kernel-dir=/lib/modules/"$(uname -r)"/build
+make -j2
+sudo make install
+```
+
+## Kernel tuning
+
+Allow the hosts to bind the floating IPs:
+
+```bash
+echo "net.ipv4.ip_nonlocal_bind = 1" | sudo tee -a /etc/sysctl.conf
+sudo sysctl -p
+```
+
+Enable IP forwarding if the servers will route traffic:
+
+```bash
+echo "net.ipv4.ip_forward = 1" | sudo tee -a /etc/sysctl.conf
+sudo sysctl -p
+```
+
+## Keepalived configuration
+
+Both servers share the same structure; priorities determine which host owns each
+virtual IP. Adjust interface names (`ens33`, `ens34`) and IP addresses to match
+your environment.
+
+### Primary server (`/etc/keepalived/keepalived.conf`)
+
+```conf
+global_defs {
+ vrrp_version 3
+}
+
+vrrp_script chk_nginx {
+ script "pidof nginx"
+ interval 2
+}
+
+vrrp_instance VIP1 {
+ interface ens34
+ state MASTER
+ priority 200
+ advert_int 1
+ virtual_router_id 11
+ unicast_src_ip 192.168.171.128
+ unicast_peer {
+ 192.168.171.129
+ }
+ virtual_ipaddress {
+ 192.168.31.10/24 dev ens33 label ens33:vip_1
+ }
+ authentication {
+ auth_type AH
+ auth_pass aabbccdd
+ }
+ track_script {
+ chk_nginx
+ }
+}
+
+vrrp_instance VIP2 {
+ interface ens34
+ state BACKUP
+ priority 100
+ advert_int 1
+ virtual_router_id 22
+ unicast_src_ip 192.168.171.128
+ unicast_peer {
+ 192.168.171.129
+ }
+ virtual_ipaddress {
+ 192.168.31.20/24 dev ens33 label ens33:vip_2
+ }
+ authentication {
+ auth_type AH
+ auth_pass aabbccdd
+ }
+ track_script {
+ chk_nginx
+ }
+}
+```
+
+### Secondary server (`/etc/keepalived/keepalived.conf`)
+
+```conf
+global_defs {
+ vrrp_version 3
+}
+
+vrrp_script chk_nginx {
+ script "pidof nginx"
+ interval 2
+}
+
+vrrp_instance VIP1 {
+ interface ens34
+ state BACKUP
+ priority 100
+ advert_int 1
+ virtual_router_id 11
+ unicast_src_ip 192.168.171.129
+ unicast_peer {
+ 192.168.171.128
+ }
+ virtual_ipaddress {
+ 192.168.31.10/24 dev ens33 label ens33:vip
+ }
+ authentication {
+ auth_type AH
+ auth_pass aabbccdd
+ }
+ track_script {
+ chk_nginx
+ }
+}
+
+vrrp_instance VIP2 {
+ interface ens34
+ state MASTER
+ priority 200
+ advert_int 1
+ virtual_router_id 22
+ unicast_src_ip 192.168.171.129
+ unicast_peer {
+ 192.168.171.128
+ }
+ virtual_ipaddress {
+ 192.168.31.20/24 dev ens33 label ens33:vip
+ }
+ authentication {
+ auth_type AH
+ auth_pass aabbccdd
+ }
+ track_script {
+ chk_nginx
+ }
+}
+```
+
+## Start Keepalived
+
+```bash
+sudo systemctl enable --now keepalived
+sudo systemctl status keepalived
+```
+
+## Validate failover
+
+```bash
+ip addr show
+tail -f /var/log/messages
+sudo tcpdump -vvv -n -i ens34 vrrp
+```
+
+Allow VRRP traffic through the local firewall:
+
+```bash
+sudo iptables -I INPUT -p vrrp -j ACCEPT
+sudo iptables -I OUTPUT -p vrrp -j ACCEPT
+```
+
+Shut down NGINX or Keepalived on one node to observe the VIP failover behavior.
diff --git a/High Availability/keepalived-nginx.txt b/High Availability/keepalived-nginx.txt
deleted file mode 100644
index f633447..0000000
--- a/High Availability/keepalived-nginx.txt
+++ /dev/null
@@ -1,237 +0,0 @@
-######################################
-## HA_Load Balance
-## keepalived + nginx
-## Author: nduytg
-## Version 0.9 - Date: 12/12/17
-######################################
-
-# Tested on CentOS 7
-# Reference
-# https://www.nginx.com/resources/admin-guide/nginx-ha-keepalived/
-# http://keepalived.readthedocs.io/en/latest/installing_keepalived.html
-# http://scale-out-blog.blogspot.com/2011/01/virtual-ip-addresses-and-their.html
-# http://www.austintek.com/LVS/LVS-HOWTO/HOWTO/LVS-HOWTO.failover.html
-
-### Option 1: Install Keepalived by yum ###
-yum update
-yum install gcc kernel-headers kernel-devel
-yum install keepalived
-
-### Option 2: Install keepalived from source ###
-# Install kernel headers, kernel-devel #
-yum install gcc kernel-headers kernel-devel
-
-cd ~
-mkdir keepalived_source
-cd keepalived_source
-wget http://keepalived.org/software/keepalived-1.3.9.tar.gz
-tar -xvzf keepalived-1.3.9.tar.gz
-
-# Compile and Install
-cd keepalived-1.3.9
-./configure --with-kernel-dir=/lib/modules/$(uname -r)/build
-
-make -j2 && make install
-
-# Create soft links
-# (not yet....)
-
----------------------------
-
-# Enable IP forwarding #
-# (if need to forward traffic between interfaces) #
-# echo 'net.ipv4.ip_forward = 1' >> /etc/sysctl.conf
-
-# Enable binding non-local IP #
-echo "net.ipv4.ip_nonlocal_bind = 1" >> /etc/sysctl.conf
-sysctl -p
-
-### Configure Keepalived service ###
-## Active - Active Mode ##
-## Primary Server ##
-vim /etc/keepalived/keepalived.conf
-------------PRIMARY SERVER------------
-global_defs {
- vrrp_version 3
-}
-
-vrrp_script chk_nginx {
- script "pidof nginx"
- interval 2
-}
-
-vrrp_instance VIP1 {
- # Primary private interface
- # specify the network interface for the instance to run on
- interface ens34
- state MASTER
- # Master priority (1-254)
- priority 200
- # VRRP sending interval
- advert_int 1
-
- virtual_router_id 11
- # Primary private IP used to communicate between peers
- unicast_src_ip 192.168.171.128
- unicast_peer {
- 192.168.171.129
- }
-
- virtual_ipaddress
- {
- 192.168.31.10/24 dev ens33 label ens33:vip_1
- }
-
- authentication
- {
- # Use IP-Sec Authentication Header
- # More secure than plain text password
- auth_type AH
- # The auth_pass will only use the first 8 characters entered.
- auth_pass aabbccdd
- }
-
- track_script
- {
- chk_nginx
- }
-}
-
-vrrp_instance VIP2 {
- # Primary private interface
- # specify the network interface for the instance to run on
- interface ens34
- state BACKUP
- # Backup priority (1-254)
- priority 100
- # VRRP sending interval
- advert_int 1
-
- virtual_router_id 22
- # Primary private IP used to communicate between peers
- unicast_src_ip 192.168.171.128
- # The auth_pass will only use the first 8 characters entered.
- unicast_peer {
- 192.168.171.129
- }
-
- virtual_ipaddress
- {
- 192.168.31.20/24 dev ens33 label ens33:vip_2
- }
-
- authentication
- {
- # Use IP-Sec Authentication Header
- # More secure than plain text password
- auth_type AH
- auth_pass aabbccdd
- }
-
- track_script
- {
- chk_nginx
- }
-}
-----------------------------------------
-
-------------SECONDARY SERVER------------
-global_defs {
- vrrp_version 3
-}
-
-vrrp_script chk_nginx {
- script "pidof nginx"
- interval 2
-}
-
-vrrp_instance VIP1 {
- # Secondary private interface
- # specify the network interface for the instance to run on
- interface ens34
- state BACKUP
- # Master priority (1-254)
- priority 100
- # VRRP sending interval
- advert_int 1
-
- virtual_router_id 11
- # Secondary private IP used to communicate between peers
- unicast_src_ip 192.168.171.129
- unicast_peer {
- 192.168.171.128
- }
-
- virtual_ipaddress
- {
- 192.168.31.10 dev ens33 label ens33:vip
- }
-
- authentication
- {
- # Use IP-Sec Authentication Header
- # More secure than plain text password
- auth_type AH
- # The auth_pass will only use the first 8 characters entered.
- auth_pass aabbccdd
- }
-
- track_script
- {
- chk_nginx
- }
-}
-
-vrrp_instance VIP2 {
- # Secondary private interface
- # specify the network interface for the instance to run on
- interface ens34
- state MASTER
- # Master priority (1-254)
- priority 200
- # VRRP sending interval
- advert_int 1
-
- virtual_router_id 22
- # Secondary private IP used to communicate between peers
- unicast_src_ip 192.168.171.129
- unicast_peer {
- 192.168.171.128
- }
-
- virtual_ipaddress
- {
- 192.168.31.20 dev ens33 label ens33:vip
- }
-
- authentication
- {
- # Use IP-Sec Authentication Header
- # More secure than plain text password
- auth_type AH
- # The auth_pass will only use the first 8 characters entered.
- auth_pass aabbccdd
- }
-
- track_script
- {
- chk_nginx
- }
-}
-----------------------------------------
-
-# Start keepalived service on both VM #
-systemctl start keepalived
-systemctl enabled keepalived
-systemctl status keepalived
-
-# Check if keepalived works #
-ip addr show
-tail -f /var/log/messages
-tcpdump -vvv -n -i ens34 vrrp
-
-# Allow VRRP traffice through iptables
-iptables -I INPUT -p vrrp -j ACCEPT
-iptables -I OUTPUT -p vrrp -j ACCEPT
-
-
diff --git a/Load Balancing/nginx-loadbalancing.md b/Load Balancing/nginx-loadbalancing.md
new file mode 100644
index 0000000..62f020e
--- /dev/null
+++ b/Load Balancing/nginx-loadbalancing.md
@@ -0,0 +1,101 @@
+# NGINX Load Balancing with Sticky Sessions
+
+- **Author:** nduytg
+- **Version:** 1.0
+- **Date:** 2017-12-08
+- **Tested on:** CentOS 7
+
+Deploy an active-active pair of NGINX load balancers fronted by Keepalived
+virtual IPs. Each node proxies traffic to its local backend and the peer node
+while honoring affinity based on a cookie.
+
+## Topology
+
+- VIP1: `192.168.31.10`
+- VIP2: `192.168.31.20`
+- Load balancer 1: `192.168.31.130`
+- Load balancer 2: `192.168.31.131`
+
+## Load balancer 1 (`/etc/nginx/nginx.conf`)
+
+```nginx
+http {
+ upstream backend_sv {
+ server 127.0.0.1:8080 max_fails=3 fail_timeout=10s;
+ server 192.168.31.131:8080 max_fails=3 fail_timeout=10s;
+ }
+
+ map $cookie_backend $sticky_backend {
+ backend_cookie1 127.0.0.1:8080;
+ backend_cookie2 192.168.31.131:8080;
+ default backend_sv;
+ }
+
+ server {
+ listen 80;
+ location / {
+ set $target http://$sticky_backend;
+ proxy_pass $target;
+ proxy_set_header Host $host;
+ proxy_set_header X-Real-IP $remote_addr;
+ proxy_set_header X-Forwarded-Proto $scheme;
+ error_page 500 502 503 504 = @backend_down;
+ }
+
+ location @backend_down {
+ proxy_pass http://backend_sv;
+ }
+ }
+}
+```
+
+## Load balancer 2 (`/etc/nginx/nginx.conf`)
+
+```nginx
+http {
+ upstream backend_sv {
+ server 127.0.0.1:8080 max_fails=3 fail_timeout=10s;
+ server 192.168.31.130:8080 max_fails=3 fail_timeout=10s;
+ }
+
+ map $cookie_backend $sticky_backend {
+ backend_cookie1 127.0.0.1:8080;
+ backend_cookie2 192.168.31.130:8080;
+ default backend_sv;
+ }
+
+ server {
+ listen 80;
+ location / {
+ set $target http://$sticky_backend;
+ proxy_pass $target;
+ proxy_set_header Host $host;
+ proxy_set_header X-Real-IP $remote_addr;
+ proxy_set_header X-Forwarded-Proto $scheme;
+ error_page 500 502 503 504 = @backend_down;
+ }
+
+ location @backend_down {
+ proxy_pass http://backend_sv;
+ }
+ }
+}
+```
+
+## Backend servers
+
+Each PHP backend sets a cookie to keep users pinned to the same node.
+
+```nginx
+server {
+ listen 80;
+ # ... other configuration ...
+ location ~ ^/.+\.php(/|$) {
+ add_header Set-Cookie "backend=backend_cookie1;Max-Age=3600";
+ # PHP-FPM proxy configuration
+ }
+}
+```
+
+On the second backend, set `backend_cookie2` in the header. Adjust cookie names
+and upstream weights to reflect your environment.
diff --git a/Load Balancing/nginx-loadbalancing.txt b/Load Balancing/nginx-loadbalancing.txt
deleted file mode 100644
index 2702aee..0000000
--- a/Load Balancing/nginx-loadbalancing.txt
+++ /dev/null
@@ -1,113 +0,0 @@
-######################################
-## HA_Load Balance ##
-## keepalived + nginx ##
-## Author: nduytg ##
-## Version 1.0 - Date: 8/12/17 ##
-######################################
-
-# Tested on CentOS 7
-# Active - Active Model
-# VIP with keepalived + Load-Balance/web with nginx
-
-# VIP1: 192.168.31.10
-# VIP2: 192.168.31.20
-# Master: 192.168.31.130
-# Secondary: 192.168.31.131
-
-### Load Balancer 1 #
-vim /etc/nginx/nginx.conf
-#-----------Load Balancer 1-----------
-http {
-
- upstream backend_sv {
- # Local server
- server 127.0.0.1:8080 max_fails=3 fail_timeout=10s;
- # Neighbor server
- server 192.168.31.131:8080 max_fails=3 fail_timeout=10s;
- }
-
- map $cookie_backend $sticky_backend {
- backend_cookie1 127.0.0.1:8080 max_fails=3 fail_timeout=10s;
- backend_cookie2 192.168.31.131:8080 max_fails=3 fail_timeout=10s;
- default backend_sv;
- }
-
- server {
- listen 80;
- location / {
- set $target http://$sticky_backend;
- proxy_pass $target;
- proxy_set_header Host $host;
- proxy_set_header X-Real-IP $remote_addr;
- proxy_set_header X-Forwared-Proto $scheme;
-
- # 50x gateway time-out
- error_page 500 502 503 504 = @backend_down;
- }
-
- location @backend_down {
- proxy_pass http://backend_sv;
- }
-
- }
-}
-#------------------------------------
-
-#-----------Load Balancer 2----------
-http {
-
- upstream backend_sv {
- # Local server
- server 127.0.0.1:8080 max_fails=3 fail_timeout=10s;
- # Neighbor server
- server 192.168.31.130:8080 max_fails=3 fail_timeout=10s;
- }
-
- map $cookie_backend $sticky_backend {
- backend_cookie1 127.0.0.1:8080;
- backend_cookie2 192.168.31.130:8080;
- default backend_sv;
- }
-
- server {
- listen 80;
- location / {
- set $target http://$sticky_backend;
- proxy_pass $target;
- proxy_set_header Host $host;
- proxy_set_header X-Real-IP $remote_addr;
- proxy_set_header X-Forwared-Proto $scheme;
-
- # 50x gateway time-out
- error_page 500 502 503 504 = @backend_down;
- }
-
- location @backend_down {
- proxy_pass http://backend_sv;
- }
-
- }
-}
-#------------------------------------
-
-#------------Web Backend 1-----------
-server {
- listen 80;
- ...
- location ~ ^/.+\.php(/|$) {
- add_header Set-Cookie "backend=backend_cookie1;Max-Age=3600";
- ...
- }
-}
-#------------------------------------
-
-#------------Web Backend 2-----------
-server {
- listen 80;
- ...
- location ~ ^/.+\.php(/|$) {
- add_header Set-Cookie "backend=backend_cookie2;Max-Age=3600";
- ...
- }
-}
-#------------------------------------
diff --git a/Load Balancing/tuning-nginx.md b/Load Balancing/tuning-nginx.md
new file mode 100644
index 0000000..ecbdfda
--- /dev/null
+++ b/Load Balancing/tuning-nginx.md
@@ -0,0 +1,72 @@
+# Tuning NGINX for Caching and Keep-Alive
+
+- **Author:** nduytg
+- **Version:** 1.07
+- **Date:** 2017-12-20
+- **Tested on:** CentOS 7
+
+Optimize NGINX as a reverse proxy with persistent upstream connections and
+microcaching.
+
+## Core settings (`nginx.conf`)
+
+```nginx
+http {
+ keepalive_timeout 120s;
+ keepalive_requests 10000;
+
+ upstream backend {
+ server 1.2.3.4:8080;
+ server 1.2.3.5:8080;
+ keepalive 64;
+ }
+
+ proxy_cache_path /tmp/nginx_cache levels=1:2 \
+ keys_zone=my_cache:10m max_size=2g inactive=60m \
+ loader_threshold=300 loader_files=200;
+
+ server {
+ listen 80;
+ location / {
+ proxy_http_version 1.1;
+ proxy_set_header Connection "";
+ proxy_set_header Accept-Encoding "";
+
+ proxy_cache my_cache;
+ proxy_cache_valid 200 302 60m;
+ proxy_cache_valid 404 5m;
+ proxy_cache_revalidate on;
+ proxy_cache_min_uses 3;
+ proxy_cache_lock on;
+ proxy_cache_lock_timeout 60s;
+ proxy_cache_background_update on;
+ proxy_cache_use_stale error timeout updating http_500 http_502 http_503 http_504;
+ proxy_ignore_headers Set-Cookie;
+ proxy_no_cache $http_x_no_cache;
+
+ proxy_pass http://backend;
+ }
+ }
+}
+```
+
+Replace `/tmp/nginx_cache` with a ramdisk such as `/dev/shm/nginx_cache` to store
+cache objects in memory.
+
+## Benchmarking
+
+Always benchmark before and after tuning to validate improvements.
+
+```bash
+# Randomized siege run (50 concurrent users for 30 seconds)
+siege -c50 -d3 -t30s -i blog.ducduy.vn
+
+# ApacheBench
+ab -n 1000 -c 100 http://blog.ducduy.vn/
+
+# Quick curl loop
+for i in {1..5}; do curl -I http://blog.ducduy.vn; done
+
+# Inspect TCP connections
+ss -tpno
+```
diff --git a/Load Balancing/tuning-nginx.txt b/Load Balancing/tuning-nginx.txt
deleted file mode 100644
index 3a84cdc..0000000
--- a/Load Balancing/tuning-nginx.txt
+++ /dev/null
@@ -1,87 +0,0 @@
-######################################
-## Tuning Nginx ##
-## Author: nduytg ##
-## Version 1.07 - Date: 20/12/17 ##
-######################################
-
-# Tested on CentOS 7
-# Reference
-# https://www.nginx.com/blog/nginx-caching-guide/
-# https://www.digitalocean.com/community/tutorials/understanding-nginx-http-proxying-load-balancing-buffering-and-caching
-# https://linode.com/docs/web-servers/nginx/configure-nginx-for-optimized-performance/
-# https://tweaked.io/guide/nginx/
-# https://www.nginx.com/blog/benefits-of-microcaching-nginx/
-# https://www.nginx.com/blog/cache-placement-strategies-nginx-plus/
-
-##### Caching + Keep-alive #####
-http {
-
- keepalive_timeout 120s;
- # Default setting is 100rq/connection
- keepalive_requests 10000;
-
- upstream {
- server 1.2.3.4:8080;
- server 1.2.3.5:8080;
-
- # keep alive connection to upstream server
- # test: ss -tpno
- keepalive 64;
-
- }
- #...
- # Caching on RAM
- #proxy_cache_path /dev/shm/nginx_cache levels=1:2 keys_zone=my_cache:10m max_size=200m inactive=60m loader_threshold=300 loader_files=200;
- # Caching on Disk
- proxy_cache_path /tmp/nginx_cache levels=1:2 keys_zone=my_cache:10m max_size=2g inactive=60m loader_threshold=300 loader_files=200;
-
- server {
- # ...
- location / {
- proxy_http_version 1.1; # Always upgrade to HTTP/1.1
- proxy_set_header Connection ""; # Enable keepalives
- proxy_set_header Accept-Encoding ""; # Optimize encoding
-
- proxy_cache my_cache;
- proxy_cache_valid 200 302 60m;
- proxy_cache_valid 404 5m;
- proxy_cache_revalidate on;
- proxy_cache_min_uses 3;
-
- ## Proxy lock ##
- # we allow only 1 req per URI to hit origin
- # in case of a cache miss
- proxy_cache_lock on;
- proxy_cache_lock_timeout 60s;
- proxy_cache_background_update on;
- proxy_cache_use_stale error timeout updating http_500 http_502 http_503 http_504;
-
- ## Proxy ignore cookies ##
- # Ignore Set-cookie header
- proxy_ignore_headers Set-Cookie;
- # Proxy request to upstream if catch X-No-Cache header
- proxy_no_cache $http_x_no_cache;
-
- ## Proxy pass to upstream ##
- proxy_pass http://my_upstream;
- }
- }
-
-}
-##### Benchmark #####
-### Benchmark your system before and after you have edited the parameters above!!! ###
-
-### Benchmark with siege tool ###
-#This tells siege to send 50 concurrent users with a random access delay
-#of 1-3 seconds for 1 minutes.
-#-i means randomize url selection and -f tells it to read from the file specified.
-siege -c50 -d3 -t30s -i blog.ducduy.vn
-
-# Benchmark with ab
-ab -n 1000 -c 100 blog.ducduy.vn/
-
-# Bench mark with curl
-for i in {1..5} ; do curl -I blog.ducduy.vn ; done
-
-#Show number of connections
-ss -tpno
diff --git a/NTP/NTP_basic.md b/NTP/NTP_basic.md
new file mode 100644
index 0000000..8d31397
--- /dev/null
+++ b/NTP/NTP_basic.md
@@ -0,0 +1,45 @@
+# Configure NTP on CentOS 7
+
+- **Author:** nduytg
+- **Version:** 1.0
+- **Date:** 2017-11-14
+- **Tested on:** CentOS 7
+
+Synchronize system time using `ntpd` or `ntpdate`.
+
+## Verify clocks
+
+```bash
+date
+hwclock
+```
+
+## NTP server configuration
+
+```bash
+sudo yum -y install ntp
+sudo sed -i 's/^server .*/server vn.pool.ntp.org/' /etc/ntp.conf
+# Allow clients on the local network
+printf '\nrestrict 10.0.0.0 mask 255.255.255.0 nomodify notrap\n' | sudo tee -a /etc/ntp.conf
+sudo systemctl enable --now ntpd
+```
+
+## NTP client options
+
+### Option 1: Run `ntpd`
+
+```bash
+sudo yum -y install ntp
+sudo bash -c 'cat </etc/ntp.conf
+server
+CONF'
+sudo systemctl enable --now ntpd
+```
+
+### Option 2: Periodic sync with `ntpdate`
+
+```bash
+sudo yum -y install ntpdate
+sudo ntpdate -s
+sudo systemctl enable --now ntpdate
+```
diff --git a/NTP/NTP_basic.txt b/NTP/NTP_basic.txt
deleted file mode 100644
index a592057..0000000
--- a/NTP/NTP_basic.txt
+++ /dev/null
@@ -1,46 +0,0 @@
-###############################
-####### Configure NTP #######
-####### Author: nduytg #######
-####### Version 1.0 #######
-####### Date: 14/11/17 #######
-###############################
-
-# Tested on CentOS7
-
-# Reference
-# https://www.server-world.info/en/note?os=CentOS_7&p=ntp&f=2
-
-## Check Hardware Clock ##
-
-## Check Software/System/Local Clock ##
-date
-hwclock
-
-### NTP Server ###
-yum –y install ntp
-vi /etc/ntp.conf
-[…]
-restrict 10.0.0.0 mask 255.255.255.0 nomodify notrap
-server vn.pool.ntp.org
-
-systemctl start ntpd
-systemctl enable ntpd
-
-### NTP Client ###
-
-## Option 1: Use NTPd like Server ##
-yum –y install ntp
-vi /etc/ntp.conf
-[…]
-server
-
-systemctl start ntpd
-systemctl enable ntpd
-
-## Option 2: Use ntpdate ##
-yum -y install ntpdate
-
-ntpdate -s
-
-systemctl enable ntpdate
-systemctl start ntpdate
diff --git a/README.md b/README.md
index 6e86c83..27cf77c 100644
--- a/README.md
+++ b/README.md
@@ -1,6 +1,45 @@
-# System-Engineer-Cheat-Sheets
-A collections of tutorials for junior system engineers
+# System Engineer Cheat Sheets
-Well, this repo is a collection of my tutorial as a System Engineer.
-I'm currently working with linux, nginx, docker... and I hope that I can improve this repo a lot in the near future.
+This repository curates practical, task-oriented guides for junior system
+engineers. All content now lives in Markdown with consistent metadata, headings,
+and fenced code blocks to improve readability and collaboration.
+## Repository layout
+
+Cheat sheets are organized by topic. Each directory contains one or more
+`*.md` files.
+
+- `Automation/` – Ansible primers and automation playbooks.
+- `Backup/` – Rsync- and script-based backup guides, including rotation scripts.
+- `Docker/` – Multi-part Docker tutorials plus networking and storage notes.
+- `Firewall/`, `High Availability/`, `Load Balancing/`, `NTP/`, `SSH/`,
+ `Tunning kernel/`, `Utils/`, `Web Services/` – Platform-specific references
+ for core services.
+
+## Local development
+
+1. [Install pre-commit](https://pre-commit.com/#install) if you do not already
+ have it available.
+2. Install the hooks for this repository:
+
+ ```bash
+ pre-commit install
+ ```
+
+3. Run the hooks before pushing changes:
+
+ ```bash
+ pre-commit run --all-files
+ ```
+
+## Linting
+
+Markdown linting is enforced in two ways:
+
+- A pre-commit hook runs `pymarkdown` to scan all Markdown files locally.
+- A GitHub Actions workflow (`Markdown lint`) executes the same check on every
+ push and pull request.
+
+Install the linter with `pip install pymarkdownlnt` or via `pre-commit`'s
+managed environments. Running the checks keeps the cheat sheets consistently
+formatted and ready for publishing.
diff --git a/SSH/SSH Overview.md b/SSH/SSH Overview.md
new file mode 100644
index 0000000..88688c3
--- /dev/null
+++ b/SSH/SSH Overview.md
@@ -0,0 +1,56 @@
+# Secure SSH Configuration
+
+- **Author:** nduytg
+- **Version:** 1.0
+- **Tested on:** CentOS 7
+
+Use strong algorithms, disable password authentication, and enforce key-based
+access.
+
+> Audit the configuration before and after hardening with
+> .
+
+## Generate a key pair
+
+```bash
+ssh-keygen -b 4096 -t rsa
+ssh-keygen -l -f ~/.ssh/id_rsa.pub
+ssh-copy-id user@server
+```
+
+## Harden `sshd_config`
+
+```text
+Port 44444
+Protocol 2
+Ciphers aes128-ctr,aes192-ctr,aes256-ctr
+HostKeyAlgorithms ecdsa-sha2-nistp256,ecdsa-sha2-nistp384,ecdsa-sha2-nistp521,ssh-rsa,ssh-dss
+KexAlgorithms ecdh-sha2-nistp256,ecdh-sha2-nistp384,ecdh-sha2-nistp521,diffie-hellman-group14-sha1,diffie-hellman-group-exchange-sha256
+MACs hmac-sha2-256,hmac-sha2-512,hmac-sha1
+PermitRootLogin no
+PasswordAuthentication no
+PermitEmptyPasswords no
+AllowUsers user1 user2 nduytg
+DenyUsers user3 user4
+IgnoreRhosts yes
+MaxAuthTries 3
+MaxSessions 5
+RSAAuthentication yes
+PubkeyAuthentication yes
+AuthorizedKeysFile .ssh/authorized_keys
+ClientAliveInterval 300
+ClientAliveCountMax 0
+AllowTcpForwarding no
+AllowStreamLocalForwarding no
+GatewayPorts no
+PermitTunnel no
+```
+
+Use `Match` blocks to override settings for specific users or networks if
+required.
+
+## Apply changes
+
+```bash
+sudo systemctl restart sshd
+```
diff --git a/SSH/SSH Overview.txt b/SSH/SSH Overview.txt
deleted file mode 100644
index bc97396..0000000
--- a/SSH/SSH Overview.txt
+++ /dev/null
@@ -1,86 +0,0 @@
-################################
-##### Configure SSH #####
-##### Author: nduytg #####
-################################
-
-***Note: use this tool to audit SSH server before and after configured
-https://github.com/nduytg/ssh-audit/releases/tag/v1.7.0
-
-### Choosing an algorithm and proper key size ###
-### Generate key pair ##
-ssh-keygen -b 4096 -t rsa
-ssh-keygen -l
-
-## Copy public key to remote server
-
-
-### Server-Side Configuration ###
-vi /etc/ssh/sshd_config
-[...]
-
-#Port 22 (change to the port you want)
-Port 44444
-
-# Only use strong cipher algorithm
-# Symmetric algorithms
-Ciphers aes128-ctr,aes192-ctr,aes256-ctr
-
-# Host Key Algorithms (Key Pair Algorithms)
-HostKeyAlgorithms ecdsa-sha2-nistp256,ecdsa-sha2-nistp384,ecdsa-sha2-nistp521,ssh-rsa,ssh-dss
-
-# Key Exchange Algorithms
-KexAlgorithms ecdh-sha2-nistp256,ecdh-sha2-nistp384,ecdh-sha2-nistp521,diffie-hellman-group14-sha1,diffie-hellman-group-exchange-sha256
-
-# Enable message authentication code algorithms that are used for protecting data
-MACs hmac-sha2-256,hmac-sha2-512,hmac-sha1
-
-# Only use SSH Protocol 2
-Protocol 2
-
-# Not allow Password + Root login
-PermitRootLogin no
-PasswordAuthentication no
-PermitEmptyPasswords no
-
-# Allow specific users
-PermitRootLogin no
-AllowUsers user1 user2 nduytg
-DenyUsers user3 user4
-
-# Disable rhost file (access to another server base on "trust")
-IgnoreRhosts yes
-
-# Limit authentication tries
-MaxAuthTries 3
-MaxSessions 5
-
-
-# Allow RSA authentication
-RSAAuthentication yes
-PubkeyAuthentication yes
-AuthorizedKeysFile .ssh/authorized_keys
-
-# Not allow idle sessions
-ClientAliveInterval 300 # 5 minutes
-ClientAliveCountMax 0
-
-# Optional: Not allow port forwarding (avoid ssh tunnelling)
-AllowTcpForwarding no
-AllowStreamLocalForwarding no
-GatewayPorts no
-PermitTunnel no
-
-# Optional options
-# Match Address 192.168.0.1,10.10.123.0/24
-# then list out your options for these addresses like mentioned above
-
-# Match Users nduytg
-# then list out your options for these users like mentioned above
-
-----------------------
-
-# Restart sshd service
-service sshd restart
-
-
-
diff --git a/SSH/after_result.txt b/SSH/after_result.md
similarity index 98%
rename from SSH/after_result.txt
rename to SSH/after_result.md
index f75f8bd..770a6be 100644
--- a/SSH/after_result.txt
+++ b/SSH/after_result.md
@@ -1,3 +1,6 @@
+# SSH Audit Report (After Hardening)
+
+```text
[0;36m# general[0m
[0;32m(gen) banner: SSH-2.0-OpenSSH_7.4[0m
[0;32m(gen) software: OpenSSH 7.4[0m
@@ -60,3 +63,5 @@
[0;32m(rec) +hmac-sha2-512-etm@openssh.com -- mac algorithm to append [0m
[0;32m(rec) +umac-128-etm@openssh.com -- mac algorithm to append [0m
+
+```
diff --git a/SSH/before_result.txt b/SSH/before_result.md
similarity index 99%
rename from SSH/before_result.txt
rename to SSH/before_result.md
index 078771b..42ca00e 100644
--- a/SSH/before_result.txt
+++ b/SSH/before_result.md
@@ -1,3 +1,6 @@
+# SSH Audit Report (Before Hardening)
+
+```text
[0;36m# general[0m
[0;32m(gen) banner: SSH-2.0-OpenSSH_7.4[0m
[0;32m(gen) software: OpenSSH 7.4[0m
@@ -116,3 +119,5 @@
[0;33m(rec) -hmac-sha1-etm@openssh.com -- mac algorithm to remove [0m
[0;33m(rec) -umac-64-etm@openssh.com -- mac algorithm to remove [0m
+
+```
diff --git a/Tunning kernel/Max file descriptor b/Tunning kernel/Max file descriptor
deleted file mode 100644
index cfb8768..0000000
--- a/Tunning kernel/Max file descriptor
+++ /dev/null
@@ -1,75 +0,0 @@
-#############################################
-##### Configure Max File Descriptor #####
-##### Author: nduytg #####
-#############################################
-
-### Find Linux Open File Limit ###
-
-### ulimit usage ###
-ulimit -a: list one user's all resources limits
-ulimit -Sn: list one user's all soft resources limits
-ulimit -Hn: list one user's all hard resources limits
-
-### Check process max open file limit ###
-## Get PID ##
-ps aux | grep sshd
-or
-pidof sshd
-
-## Get process's limits ##
-cat /proc//limits
-
-## Check currently using FD ##
-ls /proc//fd | wc -l
-
-### Set new limits for users ###
-*** Option 1 ***
-## Set temporary values (valid in one session) ##
-# Only user root can set hard limits #
-ulimit -Sn
-sudo ulimit -Hn
-
-## Set permanent value ##
-# Edit file /etc/security/limits.conf #
-vi /etc/security/limits.conf
-[…]
-root soft nofile 32000
-root hard nofile 64000
-nduytg soft nofile 4096
-nduytg hard nofile 8192
-
-# Edit file PAM Login
-vi /etc/pam.d/login
-[…]
-session required pam_limits.so
-
-# Edit file SSHD config
-vi /etc/ssh/sshd_config
-[…]
-UsePAM yes
-
-# Reboot system
-init 6
-ulimit –n
-ulimit –Hn
-ulimit –Sn
-
-*** Option 2 ***
-prlimit --pid --nofile=:
-prlimit --pid 1036 --nofile=32000:64000
-
-### Set new limits for system wide configuration ###
-## Set temporary values (valid in one session) ##
-cat /proc/sys/fs/file-max
-sysctl -w fs.file-max=500000
-cat /proc/sys/fs/file-max
-
-
-## Set permanent value ##
-vi /etc/sysctl.conf
-[…]
-fs.file-max=700000
-
-# Reload variables
-sysctl -p
-cat /proc/sys/fs/file-max
diff --git a/Tunning kernel/Tuning Kernel.txt b/Tunning kernel/Tuning Kernel.txt
deleted file mode 100644
index 3a3f37f..0000000
--- a/Tunning kernel/Tuning Kernel.txt
+++ /dev/null
@@ -1,161 +0,0 @@
-#################################################
-####### Tunning Linux Kernel #####
-####### Author: nduytg #####
-####### Version 1.2 - Date: 9/11/17 #####
-#################################################
-
-####### Tested in CentOS7 #######
-
-# Reference
-# https://klaver.it/linux/sysctl.conf
-# https://wiki.archlinux.org/index.php/Sysctl
-# https://www.kernel.org/doc/Documentation/sysctl/
-# https://www.kernel.org/doc/Documentation/networking/ip-sysctl.txt
-# https://www.speedguide.net/articles/linux-tweaking-121
-# https://vincent.bernat.im/en/blog/2014-tcp-time-wait-state-linux
-
-## Show all parameters of Linux kernel ##
-sysctl -a
-
-#### Backup sysctl.conf before changing anything ####
-sysctl -a > /etc/sysctl_backup.conf
-
-#### Edit /etc/sysctl.conf to modify kernel parameters ####
-vi /etc/sysctl.conf
-
-###### Improving Network Performance ######
-
-#### Congestion control ####
-# Congestion control protocol can be changed between cubic and htcp (Hamilton TCP) #
-# Enable timestamps, this may cause overhead because it adds 10 bytes to each packets #
-# Enable window scaling (default = 1)
-net.ipv4.tcp_congestion_control = htcp
-net.ipv4.tcp_timestamps = 1
-net.ipv4.tcp_window_scaling = 1
-
-# Enable TCP SACK, which allow client to resend only lost packets, not all of them #
-# However, in some cases, TCP may consume more resources (CPU, RAM) and decrease network performance #
-net.ipv4.tcp_sack = 1
-
-
-#### Increase socket buffer ####
-# Increase Read Memory Buffer #
-# TCP Read Memory: Min - Default - Max #
-net.ipv4.tcp_rmem = 8192 87380 16777216
-net.ipv4.udp_rmem_min = 16384
-# Default read memory buffer of all receiving sockets (except TCP and UDP)
-net.core.rmem_default = 262144
-net.core.rmem_max = 16777216
-
-# Increase Write Memory Buffer #
-# TCP Write Memory: Min - Default - Max #
-net.ipv4.tcp_wmem = 8192 65536 16777216
-net.ipv4.udp_wmem_min = 16384
-# Default read memory buffer of all sending sockets (except TCP and UDP)
-net.core.wmem_default = 262144
-net.core.wmem_max = 16777216
-
-# Increase connection queue #
-net.core.somaxconn = 16384
-
-# Improve packet processing queue, speed #
-net.core.netdev_max_backlog = 16384
-net.core.dev_weight = 64
-
-## Improve connection tracking ##
-# For high-loaded servers #
-net.nf_conntrack_max = 100000
-#or
-net.netfilter.nf_conntrack_max = 100000
-
-# Decrease connection timeout in netfilter table #
-net.netfilter.nf_conntrack_tcp_timeout_established = 600
-
-## Improving Network Security ##
-# Prevent SYN Attack #
-net.ipv4.tcp_syncookies = 1
-net.ipv4.tcp_max_syn_backlog = 262144
-net.ipv4.tcp_syn_retries = 2
-net.ipv4.tcp_synack_retries = 2
-
-# Disable packet forwarding #
-net.ipv4.ip_forward = 0
-net.ipv4.conf.all.forwarding = 0
-net.ipv4.conf.default.forwarding = 0
-net.ipv6.conf.all.forwarding = 0
-net.ipv6.conf.default.forwarding = 0
-
-# Disable IP source routing #
-net.ipv4.conf.all.accept_source_route = 0
-net.ipv4.conf.default.accept_source_route = 0
-net.ipv6.conf.all.accept_source_route = 0
-net.ipv6.conf.default.accept_source_route = 0
-
-# Block ICMP redirect packets to prevent MITM attacks #
-net.ipv4.conf.all.accept_redirects = 0
-net.ipv4.conf.default.accept_redirects = 0
-net.ipv4.conf.all.send_redirects = 0
-net.ipv4.conf.default.send_redirects = 0
-
-#### Prevent IP spoofing ####
-# Enable reverse path filter to verify IPs #
-net.ipv4.conf.all.rp_filter = 1
-net.ipv4.conf.default.rp_filter = 1
-net.ipv4.conf.all.log_martians = 1
-net.ipv4.conf.default.log_martians = 1
-
-## Decrease TCP FIN timeout ##
-net.ipv4.tcp_fin_timeout = 7
-
-## Decrease keep alive waiting time ##
-net.ipv4.tcp_keepalive_time = 300
-net.ipv4.tcp_keepalive_probes = 5
-net.ipv4.tcp_keepalive_intvl = 15
-
-#### Configure ICMP ####
-net.ipv4.icmp_echo_ignore_all = 0
-# Avoid smurf attack #
-net.ipv4.icmp_echo_ignore_broadcasts = 1
-net.ipv4.icmp_ignore_bogus_error_responses = 1
-
-# Disable Proxy ARP #
-net.ipv4.conf.all.proxy_arp = 0
-
-# Configure local port range (only if server had a lot outbound connections) #
-net.ipv4.ip_local_port_range = 16384 65535
-
-# Protect against TIME WAIT ASSASSINATION followed up RFC 1337 #
-net.ipv4.tcp_rfc1337 = 1
-
-
-###### Filesystem Tuning ######
-## Increase open file limit ##
-# For web/database/log servers which need a lot of open files #
-fs.file-max = 300000
-
-###### Memory Tuning ######
-## Decrease swapping ##
-vm.swappiness = 10
-vm.dirty_background_ratio = 5
-vm.dirty_ratio = 10
-vm.overcommit_memory = 0
-vm.overcommit_ratio = 50
-
-###### Kernel Hardening ######
-# Prevent buffer/stack/heap exploits #
-# In CentOS7/RHEL7 exec-shield has been enabled by default and removed from /proc
-# Use this option in CentOS 6 or other distro
-#kernel.exec-shield = 1
-kernel.randomize_va_space = 2
-# kernel.pid_max = 4194303
-
-###### IPv6 ######
-# Disable IPv6 by default #
-net.ipv6.conf.all.autoconf=0
-net.ipv6.conf.all.accept_ra=0
-net.ipv6.conf.default.autoconf=0
-net.ipv6.conf.default.accept_ra=0
----------------------------------------
-
-### Reload changes ###
-sysctl -p
diff --git a/Tunning kernel/process_and_file_limits.md b/Tunning kernel/process_and_file_limits.md
new file mode 100644
index 0000000..b087cb2
--- /dev/null
+++ b/Tunning kernel/process_and_file_limits.md
@@ -0,0 +1,162 @@
+# Process and File Descriptor Limits
+
+- **Author:** System Engineer Collective
+- **Version:** 2.1
+- **Date:** 2024-05-22
+- **Tested on:** Arch Linux, Debian 12, Ubuntu 22.04
+
+Processes inherit two layers of ceilings: shell-oriented limits (soft and hard)
+and the kernel-wide maximum number of descriptors the system can allocate. Keep
+all three aligned so services scale predictably.
+
+## Understand the limit types
+
+* **Soft limit** – The active threshold enforced for a running shell or service.
+ Users can raise it up to the matching hard limit with `ulimit` or `prlimit`.
+* **Hard limit** – The upper bound a non-root user can request. Root (or a
+ systemd unit with `CAP_SYS_RESOURCE`) can raise it further.
+* **Kernel ceilings** – Apply to the entire system. File descriptors use
+ `fs.file-max` for the global pool, while `fs.nr_open` defines the highest value
+ any single process can set as its hard limit. Networking workloads may also hit
+ `net.core.somaxconn` or `net.ipv4.ip_local_port_range`.
+
+`ulimit` is a shell built-in that reads or writes the soft (`-S`) and hard (`-H`)
+limits backed by the kernel `RLIMIT_*` interface. Scripts and services that spawn
+child processes inherit whatever values the parent exported.
+
+## Check your current limits
+
+Review the shell defaults before you launch long-running services.
+
+```bash
+ulimit -a
+ulimit -Sn
+ulimit -Hn
+```
+
+Map the shell settings to the actual kernel values and current consumption by
+querying `/proc`.
+
+```bash
+pid=$(pidof sshd)
+cat /proc/$pid/limits | grep -i "max open"
+ls /proc/$pid/fd | wc -l
+cat /proc/sys/fs/file-nr
+cat /proc/sys/fs/nr_open
+```
+
+`prlimit` offers the same data with clearer output and can modify values on the
+fly. Pair it with `--resource` for fine-grained inspection.
+
+```bash
+sudo prlimit --pid $pid --resource=NOFILE
+sudo prlimit --pid $pid --nofile=65535:65535
+```
+
+For systemd-managed services, `systemctl show` exposes what the manager applied
+at runtime.
+
+```bash
+systemctl show nginx -p LimitNOFILE,LimitNPROC
+```
+
+Containers introduce another layer: cgroup controllers may cap descriptors via
+`LimitNOFILE`, `TasksMax`, or OCI runtime annotations. Use `systemd-cgls` or the
+runtime tooling (`podman inspect`, `docker inspect`) to verify no additional
+limits interfere with your tuning.
+
+## Apply temporary adjustments
+
+`ulimit` changes apply to the current shell session and any children it spawns.
+Use it for ad-hoc testing or in wrapper scripts executed by systemd units.
+
+```bash
+ulimit -n 65535
+```
+
+Modify a running service without restarting by targeting its PID with
+`prlimit`.
+
+```bash
+sudo prlimit --pid 1234 --nofile=65535:65535
+```
+
+## Persist limits across reboots
+
+### PAM limits for logins and services
+
+Persist user-based limits through `/etc/security/limits.d/*.conf` or the legacy
+`/etc/security/limits.conf`. Group entries (prefixed with `@`) keep settings in
+sync for system users such as web or database services.
+
+```conf
+@nginx soft nofile 65535
+@nginx hard nofile 65535
+```
+
+Confirm `pam_limits.so` is enabled for the relevant PAM stack (login shells,
+SSH, display managers). For SSH this means setting `UsePAM yes` in
+`/etc/ssh/sshd_config`.
+
+### Systemd units
+
+Override systemd unit files instead of editing upstream service definitions.
+
+```bash
+sudo systemctl edit nginx
+```
+
+```ini
+[Service]
+LimitNOFILE=65535
+LimitCORE=infinity
+```
+
+Reload and restart the service.
+
+```bash
+sudo systemctl daemon-reload
+sudo systemctl restart nginx
+```
+
+### Kernel-wide descriptor pool
+
+Raise the system ceiling with `sysctl` so user limits have room to grow.
+
+```bash
+sysctl fs.file-max
+sudo sysctl -w fs.file-max=500000
+sudo sysctl -w fs.nr_open=1048576
+```
+
+Make the change persistent by dropping a file under `/etc/sysctl.d/`.
+
+```bash
+sudo tee /etc/sysctl.d/80-fd.conf <<'CONF'
+fs.file-max = 500000
+fs.nr_open = 1048576
+CONF
+
+sudo sysctl --system
+```
+
+Monitor overall usage with `cat /proc/sys/fs/file-nr`. The second field shows
+how many descriptors are currently allocated, which helps you size the ceiling
+for busy hosts.
+
+```bash
+cat /proc/sys/fs/file-nr
+```
+
+## Troubleshooting tips
+
+* If limits do not take effect for SSH sessions, double-check that `sshd` was
+ restarted after editing its configuration and that no conflicting files exist
+ under `/etc/security/limits.d/`.
+* For containers, remember that cgroup controllers may impose additional limits
+ (for example, `LimitNOFILE` in the unit file that launches the container). Use
+ `systemd-cgls` or runtime-specific inspectors to confirm.
+* Applications linked against `libcap` may drop privileges after start-up. They
+ must raise their soft limit before relinquishing `CAP_SYS_RESOURCE`.
+* Track per-process usage during incidents with `sudo lsof -p $pid | wc -l` or
+ `sudo ss -tanp` for sockets so you can justify permanent limit increases.
diff --git a/Tunning kernel/tuning_kernel.md b/Tunning kernel/tuning_kernel.md
new file mode 100644
index 0000000..e826758
--- /dev/null
+++ b/Tunning kernel/tuning_kernel.md
@@ -0,0 +1,216 @@
+# Linux Kernel Tuning Essentials
+
+- **Author:** System Engineer Collective
+- **Version:** 2.1
+- **Date:** 2024-05-22
+- **Tested on:** Arch Linux, Debian 12, Ubuntu 22.04
+
+These notes summarize the day-to-day kernel tuning tasks you will meet on most
+Linux servers. They focus on practical inspection commands, temporary tweaks,
+and the persistent configuration you need so the changes survive a reboot.
+
+## Inspecting and applying kernel parameters
+
+### Discover current settings
+
+Use `sysctl` to list or query runtime kernel parameters. Pipe the full list
+through `less` or `rg` when you are hunting for a specific key.
+
+```bash
+sudo sysctl -a | less
+sudo sysctl net.core.somaxconn
+cat /proc/sys/net/ipv4/tcp_fin_timeout
+```
+
+For quick auditing, combine `sysctl --values` with command substitution so you
+only print the numeric results.
+
+```bash
+sudo sysctl --values net.ipv4.ip_local_port_range
+```
+
+You can always fall back to the `/proc/sys` hierarchy. Each dot in the parameter
+name maps to a directory level, which helps when you need to discover related
+options.
+
+```bash
+find /proc/sys/net/ipv4 -maxdepth 1 -type f
+```
+
+### Apply runtime changes
+
+`sysctl -w` (or the longer `sysctl key=value` form) updates a kernel value
+immediately until the next reboot.
+
+```bash
+sudo sysctl -w net.core.somaxconn=32768
+sudo sysctl vm.dirty_ratio=10
+```
+
+Alternatively, write straight to the `/proc/sys` interface when you are working
+inside automation that already has root privileges.
+
+```bash
+echo 1 | sudo tee /proc/sys/net/ipv4/tcp_timestamps
+```
+
+### Make changes persistent
+
+Drop-in configuration files keep your tuning reproducible. Create a descriptive
+file under `/etc/sysctl.d/` and reapply all settings with `sysctl --system`.
+
+```bash
+sudo tee /etc/sysctl.d/99-performance.conf <<'CONF'
+net.core.somaxconn = 32768
+vm.swappiness = 10
+CONF
+
+sudo sysctl --system
+```
+
+Most distributions also read `/etc/sysctl.conf`. Use whichever location best
+fits your configuration management story, but keep related options grouped to
+simplify reviews. Document the rationale and validation commands in version
+control so future reviewers understand why a value was chosen.
+
+## User, process, and kernel limits
+
+Per-process limits (for example, the number of open files) and global kernel
+limits complement each other:
+
+* **Soft limit** – The active ceiling enforced for a shell or process. Users
+ can raise it up to the matching hard limit.
+* **Hard limit** – The maximum a non-root user can request. Only root or
+ privileged services can expand this boundary.
+* **Kernel-wide limit** – A system ceiling that applies regardless of process
+ ownership. For file descriptors the important tunables are `fs.file-max` for
+ the global pool and `fs.nr_open` for the maximum any single process can reach.
+
+Inspect the current state with the shell built-ins or by reading the process
+metadata directly.
+
+```bash
+ulimit -a
+ulimit -Sn
+ulimit -Hn
+cat /proc/"$PID"/limits
+```
+
+Use `prlimit` when you need to review or adjust limits for a running service
+without restarting it. It exposes the same data the kernel enforces via the
+`RLIMIT_*` interface.
+
+```bash
+sudo prlimit --pid "$PID"
+sudo prlimit --pid "$PID" --nofile=65535:65535
+```
+
+Persist per-user limits in `/etc/security/limits.d/*.conf` (or the legacy
+`/etc/security/limits.conf`). Make sure PAM sessions load `pam_limits.so`—it is
+enabled by default on modern distributions and through SSH when `UsePAM yes` is
+set in `sshd_config`.
+
+```conf
+@nginx soft nofile 65535
+@nginx hard nofile 65535
+```
+
+Match those values with kernel ceilings via `sysctl` and confirm the headroom
+that `fs.nr_open` provides for each process.
+
+```bash
+sudo sysctl -w fs.file-max=200000
+sudo sysctl -w fs.nr_open=1048576
+```
+
+More detail on per-process configuration—including fine-grained diagnostics—is
+provided in [`process_and_file_limits.md`](./process_and_file_limits.md).
+
+## I/O scheduler selection
+
+Rotational disks and solid-state media benefit from different schedulers. Query
+all block devices and their current policy with `lsblk`.
+
+```bash
+lsblk -d -o NAME,ROTA,SCHED
+```
+
+* **SSD/NVMe** – Choose `none` (previously called `noop`) or `mq-deadline` to
+ minimize latency.
+* **SATA SSDs on legacy kernels** – `deadline` strikes a balance between
+ throughput and fairness.
+* **Spinning disks** – `bfq` and `kyber` (where available) focus on consistent
+ throughput for sequential workloads.
+
+Switch schedulers at runtime by writing to the queue attribute.
+
+```bash
+echo mq-deadline | sudo tee /sys/block/sda/queue/scheduler
+echo none | sudo tee /sys/block/nvme0n1/queue/scheduler
+```
+
+Persist the choice with a udev rule so it reapplies when the device comes back
+online.
+
+```bash
+sudo tee /etc/udev/rules.d/60-io-scheduler.rules <<'RULE'
+ACTION=="add|change", KERNEL=="nvme[0-9]n[0-9]", ATTR{queue/scheduler}="none"
+RULE
+
+sudo udevadm control --reload
+```
+
+On systems that boot via GRUB, you can also append `scsi_mod.use_blk_mq=1` or a
+specific `elevator=` option to the kernel command line for legacy drivers.
+
+## Swap and virtual memory
+
+Start by reviewing the active swap devices and virtual memory policy.
+
+```bash
+swapon --show
+free -h
+sysctl vm.swappiness vm.vfs_cache_pressure
+```
+
+Tune swap behavior with the `vm.swappiness`, `vm.min_free_kbytes`, and dirty
+page ratios. Lower swappiness (for example 10) keeps the working set in RAM for
+latency-sensitive applications; higher values (60–100) favor offloading idle
+pages on memory-constrained hosts.
+
+```bash
+sudo sysctl -w vm.swappiness=10
+sudo sysctl -w vm.dirty_background_ratio=5
+sudo sysctl -w vm.dirty_ratio=15
+```
+
+Store long-term settings in the same `/etc/sysctl.d/` file you use for other
+kernel tunables so they survive reboots.
+
+```bash
+sudo tee /etc/sysctl.d/99-memory.conf <<'CONF'
+vm.swappiness = 10
+vm.vfs_cache_pressure = 75
+CONF
+```
+
+### When to disable swap
+
+Disabling swap altogether can help real-time trading systems, performance test
+beds, or high-throughput databases that suffer when the kernel reclaims pages.
+You still need enough physical RAM to absorb spikes. Turn swap off temporarily
+and comment the entry in `/etc/fstab` to make the change permanent.
+
+```bash
+sudo swapoff -a
+sudo sed -i 's/^\/\(\S\+\s\+\S\+\s\+swap\s\)/#\1/' /etc/fstab
+```
+
+Consider using a small zram device instead of traditional swap on laptops and
+micro instances. It gives you headroom while keeping I/O on fast compressed
+memory.
+
+## Further reading
+
+* Arch Wiki – [Improving performance](https://wiki.archlinux.org/title/Improving_performance)
+* kernel.org – [Documentation/admin-guide/sysctl](https://www.kernel.org/doc/html/latest/admin-guide/sysctl/index.html)
diff --git a/Utils/X11 on CentOS b/Utils/X11 on CentOS
deleted file mode 100644
index 508f73a..0000000
--- a/Utils/X11 on CentOS
+++ /dev/null
@@ -1,8 +0,0 @@
-##########################################
-###### Install X11 on CentOS ######
-###### Author: nduytg ######
-##########################################
-
-###Install the following packages to enable X11 on CentOS:
-yum install xorg-x11-xauth
-yum install dejavu-lgc-sans-fonts
diff --git a/Utils/X11 on CentOS.md b/Utils/X11 on CentOS.md
new file mode 100644
index 0000000..ef1c13e
--- /dev/null
+++ b/Utils/X11 on CentOS.md
@@ -0,0 +1,9 @@
+# Install X11 Support on CentOS
+
+- **Author:** nduytg
+
+Enable X11 forwarding by installing the following packages:
+
+```bash
+sudo yum install -y xorg-x11-xauth dejavu-lgc-sans-fonts
+```
diff --git a/Web Services/LEMP-apt.md b/Web Services/LEMP-apt.md
new file mode 100644
index 0000000..f0e8e36
--- /dev/null
+++ b/Web Services/LEMP-apt.md
@@ -0,0 +1,72 @@
+# Install LEMP Stack on Ubuntu with apt
+
+- **Author:** nduytg
+- **Version:** 1.0
+- **Date:** 2017-11-28
+- **Tested on:** Ubuntu 16.04
+
+## Install NGINX
+
+```bash
+wget http://nginx.org/keys/nginx_signing.key
+sudo apt-key add nginx_signing.key
+sudo tee /etc/apt/sources.list.d/nginx.list <<'LIST'
+deb http://nginx.org/packages/mainline/ubuntu/ xenial nginx
+deb-src http://nginx.org/packages/mainline/ubuntu/ xenial nginx
+LIST
+sudo apt-get update
+sudo apt-get install -y nginx
+```
+
+(Optional) Build from source:
+
+```bash
+sudo apt-get install -y devscripts
+sudo apt-get build-dep -y nginx
+mkdir -p ~/nginxbuild
+cd ~/nginxbuild
+apt-get source nginx
+```
+
+## Install MySQL
+
+```bash
+sudo apt-get install -y mysql-server
+sudo mysql_secure_installation
+```
+
+## Install PHP-FPM
+
+```bash
+sudo apt-get install -y php-fpm php-mysql
+sudo sed -i 's/^;cgi.fix_pathinfo=.*/cgi.fix_pathinfo=0/' /etc/php/7.0/fpm/php.ini
+sudo systemctl restart php7.0-fpm
+```
+
+## Configure NGINX for PHP
+
+```nginx
+server {
+ listen 80 default_server;
+ server_name 192.168.171.131;
+
+ root /var/www/html;
+ index index.php index.html index.htm index.nginx-debian.html;
+
+ location / {
+ try_files $uri $uri/ =404;
+ }
+
+ location ~ \.php$ {
+ root /var/www/blog2.ducduy.vn/html;
+ fastcgi_pass unix:/run/php/php7.0-fpm.sock;
+ fastcgi_index index.php;
+ fastcgi_param SCRIPT_FILENAME $document_root$fastcgi_script_name;
+ include fastcgi_params;
+ }
+
+ location ~ /\.ht {
+ deny all;
+ }
+}
+```
diff --git a/Web Services/LEMP-apt.txt b/Web Services/LEMP-apt.txt
deleted file mode 100644
index 20a1c25..0000000
--- a/Web Services/LEMP-apt.txt
+++ /dev/null
@@ -1,81 +0,0 @@
-##########################################
-## Install LEMP Stack by Apt on Ubuntu ##
-## Author: nduytg ##
-## Version 1.0 - Date: 28/11/17 ##
-##########################################
-
-# Tested on Ubuntu 16 #
-
-# Reference: Nginx Cookbook
-
-### Install Nginx ###
-# Download Nginx signing key
-wget http://nginx.org/keys/nginx_signing.key
-apt-key add nginx_signing.key
-
-# Edit /etc/apt/sources.list.d/nginx.list
-# Xenial for Ubuntu 16, Trusty for Ubuntu 14
-vi /etc/apt/sources.list.d/nginx.list
-deb http://nginx.org/packages/mainline/ubuntu/ xenial nginx
-deb-src http://nginx.org/packages/mainline/ubuntu/ xenial nginx
-
-# Update and install nginx with Apt-get
-apt-get update
-apt-get install nginx
-
-### Custom build (based on mainline installation above) ###
-#apt-get install devscripts
-#apt-get build-dep nginx
-
-#mkdir ~/nginxbuild
-#cd ~/nginxbuild
-#apt-get source nginx
-
-### Install MySQL ###
-apt-get install mysql-server
-mysql_secure_installation
-
-### Install PHP for processing ###
-apt-get install php-fpm php-mysql
-
-## Configure PHP Processor ##
-vim /etc/php/7.0/fpm/php.ini
-[...]
-cgi.fix_pathinfo=0
-
-systemctl restart php7.0-fpm
-
-## Configure NGINX to use PHP Processor ##
-vim /etc/nginx/conf.d/default.conf
------------------------------
-server {
- listen 80 default_server;
-
- root /var/www/html;
- index index.php index.html index.htm index.nginx-debian.html;
-
- server_name 192.168.171.131;
-
- location / {
- try_files $uri $uri/ =404;
- }
-
- location ~ \.php$ {
- root /var/www/blog2.ducduy.vn/html;
- fastcgi_pass unix:/run/php/php7.0-fpm.sock;
- fastcgi_index index.php;
- fastcgi_param SCRIPT_FILENAME $document_root$fastcgi_script_name;
- include fastcgi_params;
- }
-
-
- location ~ /\.ht {
- deny all;
- }
-}
------------------------------
-
-
-
-
-
diff --git a/Web Services/LEMP-source-CentOS.md b/Web Services/LEMP-source-CentOS.md
new file mode 100644
index 0000000..2255980
--- /dev/null
+++ b/Web Services/LEMP-source-CentOS.md
@@ -0,0 +1,248 @@
+# Build LEMP Stack from Source on CentOS 7
+
+- **Author:** nduytg
+- **Version:** 1.1
+- **Date:** 2017-11-28
+- **Tested on:** CentOS 7 with NGINX 1.13.6
+
+## Preparation
+
+```bash
+sudo yum remove -y nginx* mysql* mariadb* php*
+sudo rm -rf /etc/nginx /usr/local/php
+sudo yum update -y
+sudo yum install -y epel-release
+sudo yum groupinstall -y "Development Tools"
+sudo useradd --system --home /var/cache/nginx --shell /sbin/nologin --comment "nginx user" --user-group nginx
+```
+
+## Build NGINX
+
+```bash
+sudo yum install -y gcc gcc-c++ make zlib-devel pcre-devel openssl-devel
+mkdir -p ~/nginx_source
+cd ~/nginx_source
+wget http://nginx.org/download/nginx-1.13.6.tar.gz
+tar -xvzf nginx-1.13.6.tar.gz
+cd nginx-1.13.6
+./configure \
+ --user=nginx \
+ --group=nginx \
+ --prefix=/etc/nginx \
+ --sbin-path=/usr/sbin/nginx \
+ --conf-path=/etc/nginx/nginx.conf \
+ --pid-path=/var/run/nginx.pid \
+ --lock-path=/var/run/nginx.lock \
+ --error-log-path=/var/log/nginx/error.log \
+ --http-log-path=/var/log/nginx/access.log \
+ --with-http_gzip_static_module \
+ --with-http_stub_status_module \
+ --with-http_ssl_module \
+ --with-pcre \
+ --with-file-aio \
+ --with-http_realip_module \
+ --without-http_scgi_module \
+ --without-http_uwsgi_module
+make -j"$(nproc)"
+sudo make install
+sudo cp man/nginx.8 /usr/share/man/man8
+```
+
+Create a systemd unit (`/usr/lib/systemd/system/nginx.service`):
+
+```ini
+[Unit]
+Description=nginx - high performance web server
+Documentation=https://nginx.org/en/docs/
+After=network-online.target remote-fs.target nss-lookup.target
+Wants=network-online.target
+
+[Service]
+Type=forking
+PIDFile=/var/run/nginx.pid
+ExecStartPre=/usr/sbin/nginx -t -c /etc/nginx/nginx.conf
+ExecStart=/usr/sbin/nginx -c /etc/nginx/nginx.conf
+ExecReload=/bin/kill -s HUP $MAINPID
+ExecStop=/bin/kill -s TERM $MAINPID
+Restart=always
+
+[Install]
+WantedBy=multi-user.target
+```
+
+Enable the service:
+
+```bash
+sudo systemctl daemon-reload
+sudo systemctl enable --now nginx
+```
+
+## Build MySQL (MariaDB compatible)
+
+```bash
+sudo yum install -y cmake ncurses-devel
+mkdir -p ~/mysql_source
+cd ~/mysql_source
+wget https://cdn.mysql.com/Downloads/MySQL-5.6/mysql-5.6.37.tar.gz
+wget https://codeload.github.com/google/googletest/tar.gz/release-1.6.0
+mkdir -p mysql-5.6.37/source_downloads
+tar -zxvf mysql-5.6.37.tar.gz
+cd mysql-5.6.37
+tar -xvzf ../release-1.6.0 -C source_downloads
+cmake .
+make -j"$(nproc)"
+sudo make install
+sudo chown -R mysql:mysql /usr/local/mysql
+cd /usr/local/mysql
+sudo scripts/mysql_install_db --user=mysql --datadir=/var/lib/mysql
+sudo cp support-files/mysql.server /etc/init.d/mysqld
+sudo chkconfig --add mysqld
+sudo chkconfig mysqld on
+sudo service mysqld start
+sudo mysql_secure_installation
+```
+
+For systemd-based autostart, create `/etc/systemd/system/multi-user.target.wants/mysqld.service` with `Restart=always` or rely on `mysqld_safe`.
+
+## Build PHP 7 with FPM
+
+```bash
+sudo yum install -y gcc gcc-c++ libxml2-devel pkgconfig openssl-devel \
+ bzip2-devel curl-devel libpng-devel libjpeg-devel libXpm-devel \
+ freetype-devel gmp-devel libmcrypt-devel mariadb-devel aspell-devel \
+ recode-devel autoconf bison re2c libicu-devel
+mkdir -p ~/php_source
+cd ~/php_source
+wget http://sg2.php.net/distributions/php-7.0.6.tar.gz
+tar -xvzf php-7.0.6.tar.gz
+cd php-7.0.6
+./configure \
+ --with-config-file-path=/usr/local/php/etc \
+ --with-mysqli=/usr/local/mysql/bin/mysql_config \
+ --with-pdo-mysql=/usr/local/mysql/bin/mysql \
+ --prefix=/usr/local/php \
+ --sbindir=/usr/sbin \
+ --bindir=/usr/bin \
+ --enable-mbstring \
+ --with-curl=/usr/bin/curl \
+ --with-bz2 \
+ --enable-soap \
+ --enable-zip \
+ --enable-intl \
+ --with-mcrypt=/usr/local/bin/mcrypt \
+ --with-xsl \
+ --with-openssl \
+ --with-gd \
+ --with-jpeg-dir \
+ --enable-gd-native-ttf \
+ --with-freetype-dir \
+ --disable-cgi \
+ --with-zlib \
+ --with-gettext \
+ --enable-fpm \
+ --enable-simplexml \
+ --enable-xmlreader \
+ --enable-xmlwriter \
+ --with-gdbm
+make -j"$(nproc)"
+sudo make install
+sudo cp php.ini-production /usr/local/php/lib/php.ini
+sudo cp sapi/fpm/www.conf /usr/local/php/etc/php-fpm.d/www.conf
+sudo cp sapi/fpm/php-fpm.conf /usr/local/php/etc/php-fpm.conf
+```
+
+Configure the FPM pool (`/usr/local/php/etc/php-fpm.d/www.conf`):
+
+```conf
+user = nginx
+group = nginx
+listen = /var/run/php-fpm.sock
+listen.owner = nginx
+listen.group = nginx
+```
+
+Create a systemd unit (`/usr/lib/systemd/system/php-fpm.service`):
+
+```ini
+[Unit]
+Description=The PHP FastCGI Process Manager
+After=syslog.target network.target
+
+[Service]
+Type=simple
+PIDFile=/run/php-fpm/php-fpm.pid
+ExecStart=/usr/sbin/php-fpm --nodaemonize --fpm-config /usr/local/php/etc/php-fpm.conf
+ExecReload=/bin/kill -USR2 $MAINPID
+Restart=always
+
+[Install]
+WantedBy=multi-user.target
+```
+
+Enable the service:
+
+```bash
+sudo systemctl daemon-reload
+sudo systemctl enable --now php-fpm
+```
+
+## Configure NGINX with PHP-FPM
+
+```nginx
+location ~ \.php$ {
+ root html;
+ fastcgi_pass unix:/var/run/php-fpm.sock;
+ fastcgi_index index.php;
+ fastcgi_param SCRIPT_FILENAME $document_root$fastcgi_script_name;
+ include fastcgi_params;
+}
+```
+
+## Virtual hosts
+
+```bash
+sudo mkdir -p /var/www/{blog,shop,forum}.ducduy.vn/html
+sudo chown -R nginx:nginx /var/www
+sudo mkdir -p /etc/nginx/sites-available /etc/nginx/sites-enabled
+```
+
+Include the server blocks in `nginx.conf`:
+
+```nginx
+http {
+ ...
+ include /etc/nginx/sites-enabled/*.conf;
+ server_names_hash_bucket_size 64;
+}
+```
+
+Example server block (`/etc/nginx/sites-available/blog.ducduy.vn.conf`):
+
+```nginx
+server {
+ listen 80;
+ server_name blog.ducduy.vn www.blog.ducduy.vn;
+ root /var/www/blog.ducduy.vn/html;
+ index index.php index.html index.htm info.php;
+
+ location / {
+ try_files $uri $uri/ =404;
+ }
+
+ location ~ \.php$ {
+ try_files $uri =404;
+ fastcgi_pass unix:/var/run/php-fpm.sock;
+ fastcgi_index index.php;
+ fastcgi_param SCRIPT_FILENAME $document_root$fastcgi_script_name;
+ include fastcgi_params;
+ }
+}
+```
+
+Enable the site:
+
+```bash
+sudo ln -s /etc/nginx/sites-available/blog.ducduy.vn.conf /etc/nginx/sites-enabled/blog.ducduy.vn.conf
+sudo nginx -t
+sudo systemctl restart nginx
+```
diff --git a/Web Services/LEMP-source-CentOS.txt b/Web Services/LEMP-source-CentOS.txt
deleted file mode 100644
index bfefcee..0000000
--- a/Web Services/LEMP-source-CentOS.txt
+++ /dev/null
@@ -1,363 +0,0 @@
-##################################################
-## Install LEMP Stack from source on CentOS ##
-## Author: nduytg ##
-## Version 1.1 - Date: 28/11/17 ##
-##################################################
-
-##### Tested on #####
-CentOS 7
-Nginx 1.13.6
-#####################
-
-# Reference #
-https://www.nginx.com/resources/admin-guide/installing-nginx-open-source/
-https://www.digitalocean.com/community/tutorials/how-to-compile-nginx-from-source-on-a-centos-6-4-x64-vps
-http://nginx.org/en/docs/configure.html
-https://www.vultr.com/docs/how-to-compile-nginx-from-source-on-centos-7
-https://downloads.mariadb.org/mariadb/repositories/#mirror=Beritagar&distro=CentOS&distro_release=centos7-amd64--centos7&version=10.1
-https://www.digitalocean.com/community/tutorials/how-to-configure-a-linux-service-to-start-automatically-after-a-crash-or-reboot-part-1-practical-examples
-https://shaunfreeman.name/compiling-php-7-on-centos/
-https://www.digitalocean.com/community/tutorials/how-to-set-up-nginx-server-blocks-on-centos-7
-
-##### Remove old install #####
-## Remove nginx ##
-yum list installed nginx*
-yum remove nginx*
-rm -rf /etc/nginx
-
-## Remove MySQL ##
-yum list installed mysql*
-or
-yum list installed mariadb*
-
-yum remove mysql*
-or
-yum remove mariadb*
-
-## Remove PHP ##
-yum list installed php*
-yum remove php*
-rm -rf /usr/local/php/*
-
-#### Install LEMP Stack ####
-### Preparation ###
-yum update
-yum install -y epel-release
-yum groupinstall "Development Tools"
-
-# Add Nginx user and group #
-useradd --system --home /var/cache/nginx --shell /sbin/nologin --comment "nginx user" --user-group nginx
-
-### Nginx ###
-# Install prerequisite packages #
-
-yum -y install gcc gcc-c++ make zlib-devel pcre-devel openssl-devel
-
-cd ~
-mkdir nginx_source
-cd nginx_source
-wget http://nginx.org/download/nginx-1.13.6.tar.gz
-tar -xvzf nginx-1.13.6.tar.gz
-
-# Configure Nginx #
-./configure \
- --user=nginx \
- --group=nginx \
- --prefix=/etc/nginx \
- --sbin-path=/usr/sbin/nginx \
- --conf-path=/etc/nginx/nginx.conf \
- --pid-path=/var/run/nginx.pid \
- --lock-path=/var/run/nginx.lock \
- --error-log-path=/var/log/nginx/error.log \
- --http-log-path=/var/log/nginx/access.log \
- --with-http_gzip_static_module \
- --with-http_stub_status_module \
- --with-http_ssl_module \
- --with-pcre \
- --with-file-aio \
- --with-http_realip_module \
- --without-http_scgi_module \
- --without-http_uwsgi_module
-
-# Compile and Install #
-make && make install
-
-# Check if nginx is good #
-nginx -V
-nginx -t
-
-# Copy Nginx Man page to /usr/share/man/man8
-cp ./man/nginx.8 /usr/share/man/man8
-man nginx
-
-## Create file systemd unit file for nginx ##
-vi /usr/lib/systemd/system/nginx.service
-# Copy and paste the following content #
-----------------------------------------
-[Unit]
-Description=nginx - high performance web server
-Documentation=https://nginx.org/en/docs/
-After=network-online.target remote-fs.target nss-lookup.target
-Wants=network-online.target
-
-[Service]
-Type=forking
-PIDFile=/var/run/nginx.pid
-ExecStartPre=/usr/sbin/nginx -t -c /etc/nginx/nginx.conf
-ExecStart=/usr/sbin/nginx -c /etc/nginx/nginx.conf
-ExecReload=/bin/kill -s HUP $MAINPID
-ExecStop=/bin/kill -s TERM $MAINPID
-Restart=always
-
-[Install]
-WantedBy=multi-user.target
-----------------------------------------
-
-# Start and enable the Nginx #
-systemctl start nginx.service && sudo systemctl enable nginx.service
-
-# Check if Nginx will start up after a reboot #
-systemctl is-enabled nginx.service
-
-## Configure Auto-start service after rebooting or crashing ##
-vi /etc/systemd/system/multi-user.target.wants/nginx.service
----------------------------------------
-[Service]
-...
-Restart=always
-...
----------------------------------------
-
-systemctl status nginx
-systemctl daemon-reload
-systemctl status nginx
-
-# Optional #
-# Plage syntax highlighting of NGINX for vim #
-mkdir ~/.vim/
-cp -r ~/nginx-1.13.6/contrib/vim/* ~/.vim/
-
-### MySQL (MariaDB) ###
-# Instal prerequisite packages #
-yum install cmake ncurses-devel
-
-# Download source code #
-cd ~
-mkdir mysql_source
-cd mysql_source
-wget https://cdn.mysql.com/Downloads/MySQL-5.6/mysql-5.6.37.tar.gz
-wget https://codeload.github.com/google/googletest/tar.gz/release-1.6.0
-
-tar -zxvf mysql-5.6.37.tar.gz
-tar -xvzf release-1.6.0 -d mysql-5.6.37/source_downloads
-
-# Compile and Install #
-cmake .
-# Compile with 2 cpus
-make -j2
-make install
-
-# Change owner and group of mysql folder #
-chown -R mysql:mysql /usr/local/mysql
-
-# Install new database #
-cd /usr/local/mysql
-scripts/mysql_install_db --user=mysql --datadir=/var/lib/mysql
-
-# Auto startup on boot #
-cp support-files/mysql.server /etc/init.d/mysqld
-chkconfig --add mysqld
-chkconfig mysqld on
-service mysqld start
-
-## Auto start when crashing ##
-# For CentOS 6 (Upstart/init.d) #
-cp /usr/local/mysql/support-files/mysql.server /etc/init.d/mysql
-chkconfig --add mysql
-service mysql restart
-# Service will be controlled and restart by mysqld_safe
-
-# For CentOS 7 (Systemd/Systemctl) #
-# Use systemctl to start, enable service
-systemctl start mysqld && systemctl enable mysqld
-nano /etc/systemd/system/multi-user.target.wants/mysqld.service
-
--------------------------------
-[Service]
-...
-...
-Restart=always
-...
--------------------------------
-
-/usr/local/mysql/bin/mysql_secure_installation
-
-### PHP ###
-# Install prerequisite packages #
-yum install gcc gcc-c++ \
- libxml2-devel pkgconfig openssl-devel bzip2-devel \
- curl-devel libpng-devel libjpeg-devel libXpm-devel \
- freetype-devel gmp-devel libmcrypt-devel mariadb-devel \
- aspell-devel recode-devel autoconf bison re2c libicu-devel
-
-# Download PHP source
-cd ~
-mkdir php_source
-cd php_source
-wget http://sg2.php.net/distributions/php-7.0.6.tar.gz
-tar -xvzf php-7.0.6.tar.gz
-cd php-7.0.6
-
-# Configure PHP 7
-./configure \
- --with-config-file-path=/usr/local/php/etc \
- --with-mysqli=/usr/local/mysql/bin/mysql_config \
- --with-pdo-mysql=/usr/local/mysql/bin/mysql \
- --prefix=/usr/local/php \
- --sbindir=/usr/sbin \
- --bindir=/usr/bin \
- --enable-mbstring \
- --with-curl=/usr/bin/curl \
- --with-bz2 \
- --enable-soap \
- --enable-zip \
- --enable-intl \
- --with-mcrypt=/usr/local/bin/mcrypt \
- --with-xsl \
- --with-openssl \
- --with-gd \
- --with-jpeg-dir \
- --enable-gd-native-ttf \
- --with-freetype-dir \
- --disable-cgi \
- --enable-zip \
- --with-zlib \
- --with-gettext \
- --enable-fpm \
- --enable-simplexml \
- --enable-xmlreader \
- --enable-xmlwriter \
- --with-gdbm
-
-# Compile and Install
-make -j2 && make install
-
-# Configure PHP 7, FPM #
-sudo cp -v ./php.ini-production /usr/local/php/lib/php.ini
-sudo cp -v ./sapi/fpm/www.conf /usr/local/php/etc/php-fpm.d/www.conf
-sudo cp -v ./sapi/fpm/php-fpm.conf /usr/local/php/etc/php-fpm.conf
-
-
-# Edit FPM pool to run php-fpm
-vi /usr/local/php/etc/php-fpm.d/www.conf
-[...]
-user = nginx
-group = nginx
-listen = /var/run/php-fpm.sock
-listen.owner = nginx
-listen.group = nginx
-
-# Auto start PHP-FPM when booting/crashing (CentOS 7) #
-vi /usr/lib/systemd/system/php-fpm.service
--------------------------------------------
-[Unit]
-Description=The PHP FastCGI Process Manager
-After=syslog.target network.target
-
-[Service]
-Type=simple
-PIDFile=/run/php-fpm/php-fpm.pid
-ExecStart=/usr/sbin/php-fpm --nodaemonize --fpm-config /usr/local/php/etc/php-fpm.conf
-ExecReload=/bin/kill -USR2 $MAINPID
-Restart=always
-
-[Install]
-WantedBy=multi-user.target
--------------------------------------------
-
-# Enable and start PHP-FPM (CentOS 7) #
-systemctl status php-fpm.service
-systemctl start php-fpm.service
-systemctl is-enabled php-fpm.service
-systemctl enable php-fpm.service
-
-### Integrate PHP-FPM into NGINX ###
-vim /etc/nginx/nginx.conf
------------------------------
-[...]
-location ~ \.php$ {
- root html;
- fastcgi_pass unix:/var/run/php-fpm.sock;
- fastcgi_index index.php;
- fastcgi_param SCRIPT_FILENAME $document_root$fastcgi_script_name;
- include fastcgi_params;
-}
------------------------------
-
-### Create Virtual Hosts ###
-# Create the directory structure #
-mkdir -p /var/www/blog.ducduy.vn/html
-mkdir -p /var/www/shop.ducduy.vn/html
-mkdir -p /var/www/forum.ducduy.vn/html
-
-chown -R nginx:nginx /var/www/blog.ducduy.vn/html
-chown -R nginx:nginx /var/www/shop.ducduy.vn/html
-chown -R nginx:nginx /var/www/forum.ducduy.vn/html
-
-# Create new server block directories #
-mkdir /etc/nginx/sites-available
-mkdir /etc/nginx/sites-enabled
-
-# Edit nginx.conf #
-# Add these lines to the end of the http{} block #
------------------------------
-[...]
-include /etc/nginx/sites-enabled/*.conf;
-server_names_hash_bucket_size 64;
------------------------------
-
-# Create server block file #
-cp /etc/nginx/conf.d/default.conf /etc/nginx/sites-available/blog.ducduy.vn.conf
-
-# Edit the new file #
-vim /etc/nginx/sites-available/blog.ducduy.vn.conf
------------------------------
-server {
- listen 80;
- server_name blog.ducduy.vn www.blog.ducduy.vn;
-
- # note that these lines are originally from the "location /" block
- root /var/www/blog.ducduy.vn/html;
- index index.php index.html index.htm info.php;
-
- location / {
- try_files $uri $uri/ =404;
- }
- error_page 404 /404.html;
- error_page 500 502 503 504 /50x.html;
- location = /50x.html {
- root /usr/share/nginx/html;
- }
-
- location ~ \.php$ {
- try_files $uri =404;
- fastcgi_pass unix:/var/run/php-fpm.sock;
- fastcgi_index index.php;
- fastcgi_param SCRIPT_FILENAME $document_root$fastcgi_script_name;
- include fastcgi_params;
- }
-}
------------------------------
-
-# Enable the new block files #
-ln -s /etc/nginx/sites-available/blog.ducduy.vn.conf /etc/nginx/sites-enabled/blog.ducduy.vn.conf
-ln -s /etc/nginx/sites-available/blog.ducduy.vn.conf /etc/nginx/sites-enabled/forum.ducduy.vn.conf
-ln -s /etc/nginx/sites-available/blog.ducduy.vn.conf /etc/nginx/sites-enabled/shop.ducduy.vn.conf
-
-nginx -t
-systemctl restart nginx
-
-
-
-
-
diff --git a/Web Services/LEMP-source-Ubuntu.md b/Web Services/LEMP-source-Ubuntu.md
new file mode 100644
index 0000000..459116e
--- /dev/null
+++ b/Web Services/LEMP-source-Ubuntu.md
@@ -0,0 +1,118 @@
+# Build LEMP Stack from Source on Ubuntu 16.04
+
+- **Author:** nduytg
+- **Version:** 0.0
+- **Date:** 2017-11-24
+- **Tested on:** Ubuntu 16.04
+
+## Preparation
+
+```bash
+sudo apt remove -y nginx mysql-server php*
+sudo rm -rf /etc/nginx /var/www/*
+sudo apt autoremove -y
+sudo apt update && sudo apt upgrade -y
+sudo apt install -y build-essential
+```
+
+## Build NGINX
+
+```bash
+cd ~
+wget https://nginx.org/download/nginx-1.13.1.tar.gz && tar zxvf nginx-1.13.1.tar.gz
+wget https://ftp.pcre.org/pub/pcre/pcre-8.40.tar.gz && tar xzvf pcre-8.40.tar.gz
+wget http://www.zlib.net/zlib-1.2.11.tar.gz && tar xzvf zlib-1.2.11.tar.gz
+wget https://www.openssl.org/source/openssl-1.1.0f.tar.gz && tar xzvf openssl-1.1.0f.tar.gz
+rm -f *.tar.gz
+cd ~/nginx-1.13.1
+./configure --with-pcre=../pcre-8.40 --with-zlib=../zlib-1.2.11 --with-openssl=../openssl-1.1.0f
+make -j"$(nproc)"
+sudo make install
+```
+
+Create `/lib/systemd/system/nginx.service` (same content as the CentOS example)
+and enable it:
+
+```bash
+sudo systemctl daemon-reload
+sudo systemctl enable --now nginx
+```
+
+## Install MySQL
+
+```bash
+sudo apt install -y mysql-server
+sudo mysql_secure_installation
+```
+
+## Build PHP 7 with FPM
+
+```bash
+sudo apt-get install -y build-essential autoconf bison libxml2-dev libbz2-dev \
+ libmcrypt-dev libcurl4-openssl-dev libltdl-dev libpng-dev libpspell-dev \
+ libreadline-dev libicu-dev libfreetype6-dev libxslt1-dev imagemagick \
+ libmagickwand-dev zlib1g-dev libssl-dev libmysqlclient-dev libgdbm-dev \
+ libdb-dev cmake
+mkdir -p ~/php_source
+cd ~/php_source
+wget http://sg2.php.net/distributions/php-7.0.6.tar.gz
+tar -xvzf php-7.0.6.tar.gz
+cd php-7.0.6
+./configure \
+ --with-config-file-path=/usr/local/php/etc \
+ --with-mysqli=/usr/bin/mysql_config \
+ --with-pdo-mysql=/usr/bin/mysql \
+ --prefix=/usr/local/php \
+ --sbindir=/usr/sbin \
+ --bindir=/usr/bin \
+ --enable-mbstring \
+ --with-curl=/usr/bin/curl \
+ --with-bz2 \
+ --enable-soap \
+ --enable-zip \
+ --enable-intl \
+ --with-mcrypt=/usr/local/bin/mcrypt \
+ --with-xsl \
+ --with-openssl \
+ --with-gd \
+ --with-jpeg-dir \
+ --enable-gd-native-ttf \
+ --with-freetype-dir \
+ --disable-cgi \
+ --with-zlib \
+ --with-gettext \
+ --enable-fpm \
+ --enable-simplexml \
+ --enable-xmlreader \
+ --enable-xmlwriter \
+ --with-gdbm
+make -j"$(nproc)"
+sudo make install
+sudo mv /usr/local/php/etc/php-fpm.d/www.conf.default /usr/local/php/etc/php-fpm.d/www.conf
+sudo mv /usr/local/php/etc/php-fpm.conf.default /usr/local/php/etc/php-fpm.conf
+```
+
+Update the FPM pool to run as the `www-data` or `nginx` user and create a
+systemd service identical to the CentOS example. Enable it with
+`sudo systemctl enable --now php-fpm`.
+
+## Integrate PHP-FPM with NGINX
+
+```nginx
+location ~ \.php$ {
+ root html;
+ fastcgi_pass unix:/var/run/php-fpm.sock;
+ fastcgi_index index.php;
+ fastcgi_param SCRIPT_FILENAME $document_root$fastcgi_script_name;
+ include fastcgi_params;
+}
+```
+
+## Virtual hosts
+
+```bash
+sudo mkdir -p /var/www/blog3.ducduy.vn/html
+```
+
+Create additional site blocks following the CentOS example, link them from
+`sites-enabled`, reload NGINX, and verify the deployment.
diff --git a/Web Services/LEMP-source-Ubuntu.txt b/Web Services/LEMP-source-Ubuntu.txt
deleted file mode 100644
index e14188e..0000000
--- a/Web Services/LEMP-source-Ubuntu.txt
+++ /dev/null
@@ -1,260 +0,0 @@
-##################################################
-## Install LAMP Stack from source on Ubuntu ##
-## Author: nduytg ##
-## Version 0.0 - Date: 24/11/17 ##
-##################################################
-
-# Tested on Ubuntu 16 #
-
-# Reference: Nginx Cookbook
-# https://www.vultr.com/docs/how-to-compile-nginx-from-source-on-ubuntu-16-04
-# https://shaunfreeman.name/installing-php-7-on-ubuntu-16-04/
-#
-
-#### Remove old install #####
-## Remove nginx ##
-apt list installed nginx
-apt remove nginx
-rm -rf /etc/nginx
-rm -rf /var/www/*
-
-## Remove MySQL ##
-apt list installed mysql-server
-or
-yum list installed mariadb*
-
-yum remove mysql-server
-or
-yum remove mariadb*
-
-## Remove PHP ##
-apt list installed php*
-apt remove php*clkea
-
-## Remove remaining packages ##
-apt autoremove
-
-#### Install LEMP Stack ####
-### Preparation ###
-apt update
-apt upgrade
-apt install build-essential -y
-
-## Download Install Packages ##
-# Nginx 1.13.1 - Mainline version
-cd ~
-wget https://nginx.org/download/nginx-1.13.1.tar.gz && tar zxvf nginx-1.13.1.tar.gz
-
-# PCRE version 4.4 - 8.40
-wget https://ftp.pcre.org/pub/pcre/pcre-8.40.tar.gz && tar xzvf pcre-8.40.tar.gz
-
-# zlib version 1.1.3 - 1.2.11
-wget http://www.zlib.net/zlib-1.2.11.tar.gz && tar xzvf zlib-1.2.11.tar.gz
-
-# OpenSSL version 1.0.2 - 1.1.0
-wget https://www.openssl.org/source/openssl-1.1.0f.tar.gz && tar xzvf openssl-1.1.0f.tar.gz
-
-rm -f *.tar.gz
-cd ~/nginx-1.13.1
-./configure --help
-
-# Compile and Install #
-
-
-make -j2 && make install
-
-## Create file systemd unit file for nginx ##
-vi /usr/lib/systemd/system/nginx.service
-# Copy and paste the following content #
-----------------------------------------
-[Unit]
-Description=nginx - high performance web server
-Documentation=https://nginx.org/en/docs/
-After=network-online.target remote-fs.target nss-lookup.target
-Wants=network-online.target
-
-[Service]
-Type=forking
-PIDFile=/var/run/nginx.pid
-ExecStartPre=/usr/sbin/nginx -t -c /etc/nginx/nginx.conf
-ExecStart=/usr/sbin/nginx -c /etc/nginx/nginx.conf
-ExecReload=/bin/kill -s HUP $MAINPID
-ExecStop=/bin/kill -s TERM $MAINPID
-Restart=always
-
-[Install]
-WantedBy=multi-user.target
-----------------------------------------
-
-# Start and enable the Nginx #
-systemctl start nginx.service && sudo systemctl enable nginx.service
-
-# Check if Nginx will start up after a reboot #
-systemctl is-enabled nginx.service
-
-
-### MySQL (MariaDB) ###
-apt install mysql-server
-
-### PHP 7 ###
-# Install prequesite packages #
-apt-get install build-essential autoconf bison libxml2-dev libbz2-dev libmcrypt-dev \
- libcurl4-openssl-dev libltdl-dev libpng12-dev libpspell-dev libreadline-dev libicu-dev \
- libxml2-dev libpng-dev libmcrypt-dev libfreetype6 libfreetype6-dev libxslt-dev imagemagick \
- libmagickwand-dev zlib1g-dev libssl-dev libmysqlclient-dev libgdbm-dev libsslcommon2-dev libdb-dev cmake
-
-# Download PHP source
-cd ~
-mkdir php_source
-cd php_source
-wget http://sg2.php.net/distributions/php-7.0.6.tar.gz && tar -xvzf php-7.0.6.tar.gz
-cd php-7.0.6
-
-# Configure PHP 7 #
-./configure \
- --with-config-file-path=/usr/local/php/etc \
- --with-mysqli=/usr/bin/mysql_config \
- --with-pdo-mysql=/usr/bin/mysql \
- --prefix=/usr/local/php \
- --sbindir=/usr/sbin \
- --bindir=/usr/bin \
- --enable-mbstring \
- --with-curl=/usr/bin/curl \
- --with-bz2 \
- --enable-soap \
- --enable-zip \
- --enable-intl \
- --with-mcrypt=/usr/local/bin/mcrypt \
- --with-xsl \
- --with-openssl \
- --with-gd \
- --with-jpeg-dir \
- --enable-gd-native-ttf \
- --with-freetype-dir \
- --disable-cgi \
- --enable-zip \
- --with-zlib \
- --with-gettext \
- --enable-fpm \
- --enable-simplexml \
- --enable-xmlreader \
- --enable-xmlwriter \
- --with-gdbm
-
-make -j2 && make install
-
-# Configure PHP 7, FPM #
-mv /usr/local/php/etc/php-fpm.d/www.conf.default /usr/local/php/etc/php-fpm.d/www.conf
-mv /usr/localphp/etc/php-fpm.conf.default /usr/local/php/etc/php-fpm.conf
-
-# Edit FPM pool to run php-fpm
-vi /usr/local/php/etc/php-fpm.d/www.conf
-[...]
-user = nginx
-group = nginx
-listen = /var/run/php-fpm.sock
-listen.owner = nginx
-listen.group = nginx
-
-
-# Auto start PHP-FPM when booting/crashing (Systemd) #
-vi /usr/lib/systemd/system/php-fpm.service
--------------------------------------------
-[Unit]
-Description=The PHP FastCGI Process Manager
-After=syslog.target network.target
-
-[Service]
-Type=simple
-PIDFile=/run/php-fpm/php-fpm.pid
-ExecStart=/usr/sbin/php-fpm --nodaemonize --fpm-config /usr/local/php/etc/php-fpm.conf
-ExecReload=/bin/kill -USR2 $MAINPID
-Restart=always
-
-[Install]
-WantedBy=multi-user.target
--------------------------------------------
-
-# Enable and start PHP-FPM (Systemd) #
-systemctl status php-fpm.service
-systemctl start php-fpm.service
-systemctl is-enabled php-fpm.service
-systemctl enable php-fpm.service
-systemctl daemon-reload
-
-### Integrate PHP-FPM into NGINX ###
-vim /etc/nginx/nginx.conf
------------------------------
-[...]
-location ~ \.php$ {
- root html;
- fastcgi_pass unix:/var/run/php-fpm.sock;
- fastcgi_index index.php;
- fastcgi_param SCRIPT_FILENAME $document_root$fastcgi_script_name;
- include fastcgi_params;
-}
------------------------------
-
-### Create Virtual Hosts ###
-# Create the directory structure #
-mkdir -p /var/www/blog3.ducduy.vn/html
-mkdir -p /var/www/shop3.ducduy.vn/html
-mkdir -p /var/www/forum3.ducduy.vn/html
-
-chown -R nginx:nginx /var/www/blog3.ducduy.vn/html
-chown -R nginx:nginx /var/www/shop3.ducduy.vn/html
-chown -R nginx:nginx /var/www/forum3.ducduy.vn/html
-
-# Create new server block directories #
-mkdir /etc/nginx/sites-available
-mkdir /etc/nginx/sites-enabled
-
-# Edit nginx.conf #
-# Add these lines to the end of the http{} block #
-vim /etc/nginx/nginx.conf
------------------------------
-[...]
-include /etc/nginx/sites-enabled/*.conf;
-server_names_hash_bucket_size 64;
------------------------------
-
-# Create server block file #
-cp /etc/nginx/conf.d/default.conf /etc/nginx/sites-available/blog3.ducduy.vn.conf
-
-# Edit the new file #
-vim /etc/nginx/sites-available/blog3.ducduy.vn.conf
------------------------------
-server {
- listen 80;
- server_name blog3.ducduy.vn www.blog3.ducduy.vn;
-
- # note that these lines are originally from the "location /" block
- root /var/www/blog3.ducduy.vn/html;
- index index.php index.html index.htm info.php;
-
- location / {
- try_files $uri $uri/ =404;
- }
- error_page 404 /404.html;
- error_page 500 502 503 504 /50x.html;
- location = /50x.html {
- root /usr/share/nginx/html;
- }
-
- location ~ \.php$ {
- try_files $uri =404;
- fastcgi_pass unix:/var/run/php-fpm.sock;
- fastcgi_index index.php;
- fastcgi_param SCRIPT_FILENAME $document_root$fastcgi_script_name;
- include fastcgi_params;
- }
-}
------------------------------
-
-# Enable the new block files #
-ln -s /etc/nginx/sites-available/blog3.ducduy.vn.conf /etc/nginx/sites-enabled/blog.ducduy.vn.conf
-ln -s /etc/nginx/sites-available/forum3.ducduy.vn.conf /etc/nginx/sites-enabled/forum3.ducduy.vn.conf
-ln -s /etc/nginx/sites-available/shop3.ducduy.vn.conf /etc/nginx/sites-enabled/shop3.ducduy.vn.conf
-
-nginx -t
-systemctl restart nginx
\ No newline at end of file
diff --git a/Web Services/LEMP-yum.md b/Web Services/LEMP-yum.md
new file mode 100644
index 0000000..1a0d5a1
--- /dev/null
+++ b/Web Services/LEMP-yum.md
@@ -0,0 +1,35 @@
+# Install LEMP Stack with yum on CentOS 7
+
+- **Author:** nduytg
+- **Version:** 1.1
+- **Date:** 2017-11-23
+- **Tested on:** CentOS 7
+
+## Install packages
+
+```bash
+sudo yum update -y
+sudo yum install -y nginx mariadb-server mariadb php php-mysql
+```
+
+## Enable services
+
+```bash
+sudo systemctl enable --now nginx
+sudo systemctl enable --now mariadb
+```
+
+Secure MariaDB:
+
+```bash
+sudo mysql_secure_installation
+```
+
+To automatically restart NGINX after crashes, add `Restart=always` to the
+`[Service]` section of `/etc/systemd/system/multi-user.target.wants/nginx.service`
+and reload systemd:
+
+```bash
+sudo systemctl daemon-reload
+sudo systemctl restart nginx
+```
diff --git a/Web Services/LEMP-yum.txt b/Web Services/LEMP-yum.txt
deleted file mode 100644
index 7622e0c..0000000
--- a/Web Services/LEMP-yum.txt
+++ /dev/null
@@ -1,47 +0,0 @@
-##########################################
-## Install LEMP Stack by Yum on CentOS ##
-## Author: nduytg ##
-## Version 1.1 - Date: 23/11/17 ##
-##########################################
-
-# Tested on CentOS 7 #
-yum update
-
-### Install LEMP Stack ###
-# Apache #
-yum install nginx
-systemctl start nginx
-systemctl enable nginx
-
-# PHP #
-yum install php php-mysql
-
-
-# MySQL (MariaDB) #
-yum install mariadb-server mariadb
-mysql_secure_installation
-systemctl start mariadb
-systemctl enable mariadb
-
-### Configure Auto-start service after rebooting or crashing ####
-systemctl is-enabled nginx
-systemctl status nginx
-systemctl enable nginx
-systemctl is-enabled nginx
-
-vi /etc/systemd/system/multi-user.target.wants/nginx.service
-[Service]
-...
-...
-Restart=always
-...
-
-systemctl status nginx
-=> Get PID
-#Reload daemon#
-systemctl daemon-reload
-kill -9 PID
-systemctl status nginx
-=> Reboot after being killed
-
-