diff --git a/Dockerfile b/Dockerfile index 9cf220b..8b1d2c4 100644 --- a/Dockerfile +++ b/Dockerfile @@ -6,7 +6,7 @@ RUN unzip rclone-current-linux-amd64.zip && mv rclone-*-linux-amd64/rclone /bin/ FROM restic/restic:0.16.0 -RUN apk add --update --no-cache curl mailx +RUN apk add --update --no-cache curl mailx nodejs npm COPY --from=rclone /bin/rclone /bin/rclone @@ -20,6 +20,7 @@ ENV RESTIC_TAG="" ENV NFS_TARGET="" ENV BACKUP_CRON="0 */6 * * *" ENV CHECK_CRON="" +ENV PRUNE_CRON="" ENV RESTIC_INIT_ARGS="" ENV RESTIC_FORGET_ARGS="" ENV RESTIC_JOB_ARGS="" @@ -36,6 +37,16 @@ ENV OS_REGION_NAME="" ENV OS_INTERFACE="" ENV OS_IDENTITY_API_VERSION=3 +ENV DATABASE_TYPE="" +ENV DATABASE_HOST="" +ENV DATABASE_PORT="" +ENV DATABASE_USER="" +ENV DATABASE_PASSWORD="" +ENV DATABASE_NAME="" +ENV DATABASE_BACKUP_TIME="0-23" + +ENV TZ="Asia/Shanghai" + # openshift fix RUN mkdir /.cache && \ chgrp -R 0 /.cache && \ @@ -51,8 +62,21 @@ RUN mkdir /.cache && \ VOLUME /data COPY backup.sh /bin/backup +RUN chmod u+x /bin/backup COPY check.sh /bin/check +RUN chmod u+x /bin/check +COPY prune.sh /bin/prune +RUN chmod u+x /bin/prune COPY entry.sh /entry.sh +RUN mkdir /script && \ + chgrp -R 0 /script && \ + chmod -R g=u /script +COPY package.json /script/package.json +COPY dump.js /script/dump.js +RUN chmod u+x /script/* +RUN cd /script && \ + npm install + ENTRYPOINT ["/entry.sh"] CMD ["tail","-fn0","/var/log/cron.log"] diff --git a/README.md b/README.md index a75fcd6..f9425b7 100644 --- a/README.md +++ b/README.md @@ -124,6 +124,7 @@ The container is set up by setting [environment variables](https://docs.docker.c * `NFS_TARGET` - Optional. If set, the given NFS is mounted, i.e. `mount -o nolock -v ${NFS_TARGET} /mnt/restic`. `RESTIC_REPOSITORY` must remain its default value! * `BACKUP_CRON` - A cron expression to run the backup. Note: The cron daemon uses UTC time zone. Default: `0 */6 * * *` aka every 6 hours. * `CHECK_CRON` - Optional. A cron expression to run data integrity check (`restic check`). If left unset, data will not be checked. Note: The cron daemon uses UTC time zone. Example: `0 23 * * 3` to run 11PM every Tuesday. +* `PRUNE_CRON` - Optional. A cron expression to Remove unneeded data from the repository (`restic prune`). If left unset, data will not be prune. * `RESTIC_FORGET_ARGS` - Optional. Only if specified, `restic forget` is run with the given arguments after each backup. Example value: `-e "RESTIC_FORGET_ARGS=--prune --keep-last 10 --keep-hourly 24 --keep-daily 7 --keep-weekly 52 --keep-monthly 120 --keep-yearly 100"` * `RESTIC_INIT_ARGS` - Optional. Allows specifying extra arguments to `restic init` such as a password file with `--password-file`. * `RESTIC_JOB_ARGS` - Optional. Allows specifying extra arguments to the backup job such as limiting bandwith with `--limit-upload` or excluding file masks with `--exclude`. @@ -142,6 +143,15 @@ The container is set up by setting [environment variables](https://docs.docker.c * `OS_REGION_NAME` - Optional. When using restic with OpenStack Swift container. * `OS_INTERFACE` - Optional. When using restic with OpenStack Swift container. * `OS_IDENTITY_API_VERSION` - Optional. When using restic with OpenStack Swift container. + +NEW Env For Database Dump +* `DATABASE_TYPE` - Optional. Specify the database type (mongo/mongodb/mysql/pg/postgres/postgresql). Specifying this option will enable the database backup function. +* `DATABASE_BACKUP_TIME` - Optional. Database backup is automatically enabled within the specified time range (default 0-23) +* `DATABASE_HOST` - Optional. Database host address +* `DATABASE_PORT` - Optional. Database host port (if not specified, it will automatically follow the default port value of the database type) +* `DATABASE_NAME` - Optional. Database name, Supports specifying multiple database names separated by ','. If not specified all database tables (except system tables) are used by default. Database type **mysql** AND **postgresql** MUST set. +* `DATABASE_USER` - Optional. Database Username +* `DATABASE_PASSWORD` - Optional. Database password ## Volumes @@ -208,6 +218,8 @@ services: volumes: - /volume1/Backup:/data/Backup:ro # Backup /volume1/Backup from host - /home/user:/data/home:ro # Backup /home/user from host + - ./post-backup.sh:/custom/post-backup.sh:ro # For k8s file in custom folder will auto copy to hooks. Run script post-backup.sh after every backup + - ./post-check.sh:/custom/post-check.sh:ro # For k8s file in custom folder will auto copy to hooks.Run script post-check.sh after every check - ./post-backup.sh:/hooks/post-backup.sh:ro # Run script post-backup.sh after every backup - ./post-check.sh:/hooks/post-check.sh:ro # Run script post-check.sh after every check - ./ssh:/root/.ssh # SSH keys and config so we can login to "storageserver" without password diff --git a/backup.sh b/backup.sh index 6391ebc..c19f80b 100755 --- a/backup.sh +++ b/backup.sh @@ -12,6 +12,38 @@ logLast() { echo "$1" >> ${lastLogfile} } +backupDatebase(){ + echo "" + echo "### Start ${DATABASE_TYPE} Dump ###" + echo "Backup Datebase: ${DATABASE_TYPE}" + # 检查 dump 目录是否存在,如果存在则删除 + if [ -d "/script/dump" ]; then + rm -rf /script/dump + fi + mkdir /script/dump + # 运行 dump 脚本 + cd /script + npm run dump + # 检查 /script/dump 目录下是否为空,不为空则复制 dump 数据到 + if [ "$(ls -A /script/dump)" ]; then + # 检查 /data/dump 目录存在,自动删除旧备份;如果不存在则创建 dump 目录 + if [ -d "/data/dump" ]; then + rm -rf /data/dump/* + else + mkdir /data/dump + fi + # 复制最新的备份 + cp -r /script/dump /data/ + echo "" + echo "MongoDB Dump List:" + ls -l /data/dump + else + echo "./dump Folder Empty, ${DATABASE_TYPE} Dump Fail." + fi + echo "### End ${DATABASE_TYPE} Dump ###" + echo "" +} + if [ -f "/hooks/pre-backup.sh" ]; then echo "Starting pre-backup script ..." /hooks/pre-backup.sh @@ -19,6 +51,11 @@ else echo "Pre-backup script not found ..." fi +# Dump Datebase +if [ -n "${DATABASE_TYPE}" ]; then + backupDatebase +fi + start=`date +%s` rm -f ${lastLogfile} ${lastMailLogfile} echo "Starting Backup at $(date +"%Y-%m-%d %H:%M:%S")" diff --git a/dump.js b/dump.js new file mode 100644 index 0000000..ac26a87 --- /dev/null +++ b/dump.js @@ -0,0 +1,589 @@ +const MongoClient = require('mongodb').MongoClient; +const mysql = require('mysql2'); +const { Pool } = require('pg'); +const { copyToStream } = require('pg-copy-streams'); + +const fs = require('fs'); +const path = require('path'); +const readline = require('readline'); +const moment = require('moment'); +const util = require('util'); + + +/** + * 导出所有MongoDB数据库的数据 + * + * 此函数会连接MongoDB服务器,获取所有非系统数据库, + * 并将每个数据库中的所有集合数据导出为JSON文件。 + * + * 环境变量要求: + * - MONGO_ROOT_USERNAME: MongoDB用户名 + * - MONGO_ROOT_PASSWORD: MongoDB密码 + * - MONGO_HOST: MongoDB主机地址 + * - MONGO_PORT: MongoDB端口号 + * - MONGO_DATABASE: MongoDB数据库名称 + * + * 导出文件将保存在 ./dump 目录下,按数据库名和集合名组织: + * ./dump/ + * ├── database1/ + * │ ├── collection1.json + * │ └── collection2.json + * └── database2/ + * └── collection1.json + * + * @async + * @throws {Error} 当数据库连接失败或导出过程出错时抛出异常 + */ +async function dumpMongoAllDatabases() { + const uri = `mongodb://${process.env.DATABASE_USER || process.env.MONGO_ROOT_USERNAME}:${process.env.DATABASE_PASSWORD || process.env.MONGO_ROOT_PASSWORD}@${process.env.DATABASE_HOST || process.env.MONGO_HOST}:${process.env.DATABASE_PORT || process.env.MONGO_PORT || '27017'}/`; + // const dbName = process.env.DATABASE_NAME || process.env.MONGO_DATEBASE; // 替换成你的数据库名称 + try { + const client = new MongoClient(uri, { useNewUrlParser: true, useUnifiedTopology: true }); + await client.connect(); + + // 获取所有数据库名称 + const databases = await client.db().admin().listDatabases(); + const databaseNames = databases.databases.map(db => db.name); + + // 创建一个文件夹来存储 dump 文件 + const dumpDir = './dump'; + if (!fs.existsSync(dumpDir)) { + fs.mkdirSync(dumpDir); + } + + // 循环遍历每个数据库并 dump + for (const dbName of databaseNames) { + // 判断数据库是否为系统数据库 + if (dbName === 'admin' || dbName === 'local' || dbName === 'config') { + console.log(`Skipping system database: ${dbName}`); + continue; + } + const db = client.db(dbName); + + // 获取所有集合 + const collections = await db.collections(); + + // 循环遍历每个集合并 dump + for (const collection of collections) { + const collectionName = collection.collectionName; + + // 创建一文件来存储 dump 数据 + const dumpFile = `${dumpDir}/${dbName}/${collectionName}.json`; + + // 创建文件夹 + const collectionDir = `${dumpDir}/${dbName}`; + if (!fs.existsSync(collectionDir)) { + fs.mkdirSync(collectionDir); + } + + // 读取集合数据 + const documents = await collection.find().toArray(); + + // 将数据写入文件 + fs.writeFileSync(dumpFile, JSON.stringify(documents, null, 2)); + + console.log(`Dumped ${collectionName} to ${dumpFile}`); + } + + console.log(`Database ${dbName} dump completed!`); + } + + console.log('All databases dump completed!'); + await client.close(); + } catch (err) { + console.error('Error during database dump:', err); + } +} + +/** + * MongoDB数据库备份工具函数 + * + * @description 该函数用于备份MongoDB数据库中的所有集合数据 + * 将每个集合的数据以JSON格式保存到本地文件中 + * + * @param {string} uri - MongoDB连接字符串 + * @param {string[]} databaseNames - 需要备份的数据库名称列表 + * + * @example + * const uri = 'mongodb://localhost:27017'; + * const databaseNames = ['mydb1', 'mydb2']; + * await dumpMongoDatabases(uri, databaseNames); + * + * @returns {Promise} + * @throws {Error} 当数据库连接或备份过程出错时抛出异常 + */ +async function dumpMongoDatabase(dbName) { + const uri = `mongodb://${process.env.DATABASE_USER || process.env.MONGO_ROOT_USERNAME}:${process.env.DATABASE_PASSWORD || process.env.MONGO_ROOT_PASSWORD}@${process.env.DATABASE_HOST || process.env.MONGO_HOST}:${process.env.DATABASE_PORT || process.env.MONGO_PORT || '27017'}/`; + // const dbName = process.env.DATABASE_NAME || process.env.MONGO_DATABASE; // 替换成你的数据库名称 + try { + if(!dbName){ + console.error('DATABASE_NAME or MONGO_DATEBASE is not set'); + return; + } + const client = new MongoClient(uri, { useNewUrlParser: true, useUnifiedTopology: true }); + await client.connect(); + const db = client.db(dbName); + + const collections = await db.collections(); + + // 创建一个文件夹来存储 dump 文件 + const dumpDir = './dump'; + if (!fs.existsSync(dumpDir)) { + fs.mkdirSync(dumpDir); + } + + // 循环遍历每个集合并 dump + for (const collection of collections) { + const collectionName = collection.collectionName; + + // 创建一个文件来存储 dump 数据 + const dumpFile = `${dumpDir}/${dbName}/${collectionName}.json`; + + // 创建文件夹 + const collectionDir = `${dumpDir}/${dbName}`; + if (!fs.existsSync(collectionDir)) { + fs.mkdirSync(collectionDir); + } + + // 读取集合数据 + const documents = await collection.find().toArray(); + + // 将数据写入文件 + fs.writeFileSync(dumpFile, JSON.stringify(documents, null, 2)); + + console.log(`Dumped ${collectionName} to ${dumpFile}`); + } + + console.log('Database dump completed!'); + await client.close(); + } catch (err) { + console.error('Error during database dump:', err); + } +} + + +/** + * 备份MySQL数据库中的所有的数据到本地文件 + * + * @param {Object} config - MySQL数据库连接配置 + * @param {string} config.host - 数据库主机地址 + * @param {string} config.user - 数据库用户名 + * @param {string} config.password - 数据库密码 + * @param {string} config.database - 数据库名称 + * @param {string} backupDir - 备份文件保存目录 + * + * @example + * const config = { + * host: 'localhost', + * user: 'root', + * password: '123456', + * database: 'mydb' + * }; + * const backupDir = './backup'; + * await backupMysqlAllDatabase(config, backupDir); + * + * @returns {Promise} + * @throws {Error} 当数据库连接或备份过程出错时抛出异常 + */ +async function backupMysqlAllDatabase() { + const config = { + host: process.env.DATABASE_HOST || process.env.MYSQL_HOST, + user: process.env.DATABASE_USER || process.env.MYSQL_USER, + password: process.env.DATABASE_PASSWORD || process.env.MYSQL_PASSWORD, + database: process.env.DATABASE_NAME || process.env.MYSQL_DATABASE, + port: parseInt(process.env.DATABASE_PORT || process.env.MYSQL_PORT || 3306) + }; + const connection = mysql.createConnection(config).promise(); + const backupDir = './dump'; + + try { + if (!fs.existsSync(backupDir)) { + fs.mkdirSync(backupDir); + } + + await connection.connect(); + + // 查询所有表 + // const [tables] = await connection.query(` + // SELECT TABLE_NAME + // FROM INFORMATION_SCHEMA.TABLES + // WHERE TABLE_SCHEMA = ? + // AND TABLE_TYPE = 'BASE TABLE' + // `, [config.database]); + const [tables] = await connection.query(` + SHOW FULL TABLES + WHERE Table_type = 'BASE TABLE'; + `, [config.database]); + + console.log('tables:', tables); + + const timestamp = moment().format('YYYYMMDDHHmmss'); + // const backupFile = path.join(backupDir, `mysql-backup-${timestamp}.sql`); + const backupFile = path.join(backupDir, `mysql-backup.sql`); + const writeStream = fs.createWriteStream(backupFile); + + // 写入文件头部信息 + writeStream.write(`-- MySQL dump for database ${config.database}\n`); + writeStream.write(`-- Created at: ${moment().format('YYYY-MM-DD HH:mm:ss')}\n\n`); + writeStream.write(`SET NAMES utf8mb4;\n`); + writeStream.write(`SET FOREIGN_KEY_CHECKS=0;\n\n`); + + // 为每个表创建备份 + for (const table of tables) { + const tableName = table.TABLE_NAME; + + // 获取建表语句 + const [createTable] = await connection.query(`SHOW CREATE TABLE \`${tableName}\``); + const createTableSql = createTable[0]['Create Table']; + + writeStream.write(`--\n-- Table structure for \`${tableName}\`\n--\n\n`); + writeStream.write(`DROP TABLE IF EXISTS \`${tableName}\`;\n`); + writeStream.write(`${createTableSql};\n\n`); + + // 获取表数据 + const [rows] = await connection.query(`SELECT * FROM \`${tableName}\``); + + if (rows.length > 0) { + writeStream.write(`--\n-- Dumping data for table \`${tableName}\`\n--\n\n`); + + // 批量处理插入语句,提高效率 + const batchSize = 100; + for (let i = 0; i < rows.length; i += batchSize) { + const batch = rows.slice(i, i + batchSize); + const values = batch.map(row => { + const rowValues = Object.values(row).map(value => { + if (value === null) return 'NULL'; + if (typeof value === 'boolean') return value ? 1 : 0; + if (typeof value === 'number') return value; + if (value instanceof Date) return `'${moment(value).format('YYYY-MM-DD HH:mm:ss')}'`; + if (Buffer.isBuffer(value)) return `0x${value.toString('hex')}`; + return `'${value.toString().replace(/[\0\x08\x09\x1a\n\r"'\\\%]/g, char => { + switch (char) { + case "\0": return "\\0"; + case "\x08": return "\\b"; + case "\x09": return "\\t"; + case "\x1a": return "\\z"; + case "\n": return "\\n"; + case "\r": return "\\r"; + case "\"": + case "'": + case "\\": + case "%": return "\\"+char; + default: return char; + } + })}'`; + }); + return `(${rowValues.join(', ')})`; + }); + + const columns = Object.keys(rows[0]).map(key => `\`${key}\``).join(', '); + writeStream.write(`INSERT INTO \`${tableName}\` (${columns}) VALUES\n`); + writeStream.write(`${values.join(',\n')};\n`); + } + writeStream.write('\n'); + } + } + + writeStream.write(`SET FOREIGN_KEY_CHECKS=1;\n`); + writeStream.end(); + console.log(`Database backup completed! Backup file: ${backupFile}`); + + } catch (err) { + console.error('Error during database backup:', err); + } finally { + if (connection) { + await connection.end(); + } + } +} + +/** + * 备份MySQL数据库中的指定表数据到本地文件 + * + * @param {Object} config - MySQL数据库连接配置 + * @param {string} config.host - 数据库主机地址 + * @param {string} config.user - 数据库用户名 + * @param {string} config.password - 数据库密码 + * @param {string} config.database - 数据库名称 + * @param {string} backupDir - 备份文件保存目录 + * + * @example + * const config = { + * host: 'localhost', + * user: 'root', + * password: '123456', + * database: 'mydb' + * }; + * const backupDir = './backup'; + * await backupMysqlDatabase(config, backupDir); + * + * @returns {Promise} + * @throws {Error} 当数据库连接或备份过程出错时抛出异常 + */ +async function backupMysqlDatabase(databaseName) { + const config = { + host: process.env.DATABASE_HOST || process.env.MYSQL_HOST, + user: process.env.DATABASE_USER || process.env.MYSQL_USER, + password: process.env.DATABASE_PASSWORD || process.env.MYSQL_PASSWORD, + database: databaseName, + port: parseInt(process.env.DATABASE_PORT || process.env.MYSQL_PORT || 3306) + }; + const connection = mysql.createConnection(config).promise(); + const backupDir = './dump'; + + try { + if (!fs.existsSync(backupDir)) { + fs.mkdirSync(backupDir); + } + + await connection.connect(); + + // 获取数据库中的所有表 + const [tables] = await connection.query(` + SELECT TABLE_NAME + FROM INFORMATION_SCHEMA.TABLES + WHERE TABLE_SCHEMA = ? + `, [config.database]); + + const timestamp = moment().format('YYYYMMDDHHmmss'); + // const backupFile = path.join(backupDir, `mysql-backup-${timestamp}.sql`); + const backupFile = path.join(backupDir, `mysql-${databaseName}-backup.sql`); + const writeStream = fs.createWriteStream(backupFile); + + // 遍历每个表并备份 + for (const table of tables) { + const tableName = table.TABLE_NAME; + + // 获取表结构 + const [tableStructure] = await connection.query(`SHOW CREATE TABLE \`${tableName}\``); + + // 写入表结构 + writeStream.write(`-- Table structure for ${tableName}\n`); + writeStream.write(`DROP TABLE IF EXISTS \`${tableName}\`;\n`); + writeStream.write(`${tableStructure[0]['Create Table']};\n\n`); + + // 获取表数据 + const [rows] = await connection.query(`SELECT * FROM \`${tableName}\``); + + if (rows.length > 0) { + writeStream.write(`-- Data for table ${tableName}\n`); + const columns = Object.keys(rows[0]); + + for (const row of rows) { + const values = columns.map(column => { + const value = row[column]; + if (value === null) return 'NULL'; + if (typeof value === 'number') return value; + return `'${value.toString().replace(/'/g, "''")}'`; + }); + + writeStream.write( + `INSERT INTO \`${tableName}\` (${columns.map(c => '`'+c+'`').join(', ')}) ` + + `VALUES (${values.join(', ')});\n` + ); + } + writeStream.write('\n'); + } + } + + writeStream.end(); + console.log(`Database backup completed! Backup file: ${backupFile}`); + + } catch (err) { + console.error('Error during database backup:', err); + } finally { + if (connection) { + await connection.end(); + } + } +} + + +/** + * 备份PostgreSQL数据库中的所有表数据到本地文件 + * + * @param {Object} config - PostgreSQL数据库连接配置 + * @param {string} config.user - 数据库用户名 + * @param {string} config.host - 数据库主机地址 + * @param {string} config.database - 数据库名称 + * @param {string} config.password - 数据库密码 + * @param {number} config.port - 数据库端口号 + * @param {string} backupDir - 备份文件保存目录 + */ +async function backupPostgresDatabase(databaseName) { + const config = { + host: process.env.DATABASE_HOST || process.env.PG_HOST, + user: process.env.DATABASE_USER || process.env.PG_USER, + password: process.env.DATABASE_PASSWORD || process.env.PG_PASSWORD, + database: databaseName, + port: parseInt(process.env.DATABASE_PORT || process.env.PG_PORT || 5432), + }; + const pool = new Pool(config); + const backupDir = './dump'; + + try { + if (!fs.existsSync(backupDir)) { + fs.mkdirSync(backupDir); + } + + const timestamp = moment().format('YYYYMMDDHHmmss'); + // const backupFile = path.join(backupDir, `postgres-backup-${timestamp}.sql`); + const backupFile = path.join(backupDir, `postgres-${databaseName}-backup.sql`); + const writeStream = fs.createWriteStream(backupFile); + + // 写入文件头部信息 + writeStream.write(`-- PostgreSQL dump\n`); + writeStream.write(`-- Created at: ${moment().format('YYYY-MM-DD HH:mm:ss')}\n\n`); + + // 获取所有表名 + const { rows: tables } = await pool.query(` + SELECT table_name + FROM information_schema.tables + WHERE table_schema = 'public' + AND table_type = 'BASE TABLE' + AND table_name NOT LIKE 'pg_%' + AND table_name NOT LIKE 'sql_%' + `); + + for (const table of tables) { + const tableName = table.table_name; + + // 获取表结构 + const { rows: columns } = await pool.query(` + SELECT column_name, data_type, character_maximum_length, + is_nullable, column_default + FROM information_schema.columns + WHERE table_name = $1 + ORDER BY ordinal_position + `, [tableName]); + + // 写入表结构 + writeStream.write(`-- Table structure: ${tableName}\n`); + writeStream.write(`DROP TABLE IF EXISTS "${tableName}";\n`); + writeStream.write(`CREATE TABLE "${tableName}" (\n`); + + const columnDefs = columns.map(col => { + let def = ` "${col.column_name}" ${col.data_type}`; + if (col.character_maximum_length) { + def += `(${col.character_maximum_length})`; + } + if (col.is_nullable === 'NO') { + def += ' NOT NULL'; + } + if (col.column_default) { + def += ` DEFAULT ${col.column_default}`; + } + return def; + }); + + writeStream.write(columnDefs.join(',\n')); + writeStream.write('\n);\n\n'); + + // 获取并写入表数据 + const { rows: data } = await pool.query(`SELECT * FROM "${tableName}"`); + if (data.length > 0) { + writeStream.write(`-- Table data: ${tableName}\n`); + for (const row of data) { + const values = Object.values(row).map(val => { + if (val === null) return 'NULL'; + if (typeof val === 'number') return val; + return `'${val.toString().replace(/'/g, "''")}'`; + }); + + writeStream.write( + `INSERT INTO "${tableName}" (${Object.keys(row).map(k => `"${k}"`).join(', ')}) ` + + `VALUES (${values.join(', ')});\n` + ); + } + writeStream.write('\n'); + } + } + + writeStream.end(); + console.log(`Database backup completed! Backup file: ${backupFile}`); + + } catch (err) { + console.error('Error during database backup:', err); + } finally { + if (pool) { + await pool.end(); + } + } +} + + +// 创建一个主函数来处理数据库备份 +async function main() { + // 通过TIME_RANGE环境变量指定时间范围,格式为"HH-HH",如"0-23" + const timeRange = process.env.DATABASE_BACKUP_TIME || "0-23"; + const [startStr, endStr] = timeRange.split("-"); + let start = parseInt(startStr); + let end = parseInt(endStr); + const now = moment().hour(); + // 验证时间范围的有效性 + if (isNaN(start) || isNaN(end) || start < 0 || start > 23 || end < 0 || end > 23) { + console.error('Invalid time range format, using default value "0-23"'); + // return; + start = 0; + end = 23; + } + console.log(`Current time: ${now}, Specified time range: ${start}-${end}`); + if (now < start || now > end) { + console.log(`Now not within the specified time period ${process.env.DATABASE_BACKUP_TIME}, Database Dump Task has been cancelled.`); + return; + } + // ���据 DATABASE_TYPE 全局变判断要执行的备份类型 + if(process.env.DATABASE_TYPE){ + let type = process.env.DATABASE_TYPE.toLowerCase() + switch(type){ + case "mongo": + case "mongodb": + if(process.env.DATABASE_NAME){ + // 备份数据库可能为多个,使用,进行分割 + const databaseNames = process.env.DATABASE_NAME.split(','); + for(const databaseName of databaseNames){ + console.log(`Backuping database: ${databaseName}`); + await dumpMongoDatabase(databaseName) + console.log(`Backuping database: ${databaseName} completed!`); + } + }else{ + await dumpMongoAllDatabases() + } + break; + case "mysql": + if(process.env.DATABASE_NAME){ + // 备份数据库可能为多个,使用,进行分割 + const databaseNames = process.env.DATABASE_NAME.split(','); + for(const databaseName of databaseNames){ + console.log(`Backuping database: ${databaseName}`); + await backupMysqlDatabase(databaseName) + console.log(`Backuping database: ${databaseName} completed!`); + } + }else{ + console.error('DATABASE_NAME env must be specified'); + // await backupMysqlAllDatabase() + } + break; + case "pg": + case "postgres": + case "postgresql": + if(process.env.DATABASE_NAME){ + const databaseNames = process.env.DATABASE_NAME.split(','); + for(const databaseName of databaseNames){ + console.log(`Backuping database: ${databaseName}`); + await backupPostgresDatabase(databaseName) + console.log(`Backuping database: ${databaseName} completed!`); + } + }else{ + console.error('DATABASE_NAME env must be specified'); + } + break; + default: + console.error('Unsupported database type:', type); + break; + } + } +} + +// 执行主函数 +main().catch(console.error); diff --git a/entry.sh b/entry.sh index de51760..c8b1725 100755 --- a/entry.sh +++ b/entry.sh @@ -2,6 +2,15 @@ echo "Starting container ..." +# Copy Custom Hooks Script File +if [ -d "/custom" ]; then + mkdir /hooks + cp -L /custom/* /hooks + chmod u+x /hooks/* + # Run npm install + npm install +fi + if [ -n "${NFS_TARGET}" ]; then echo "Mounting NFS based on NFS_TARGET: ${NFS_TARGET}" mount -o nolock -v ${NFS_TARGET} /mnt/restic @@ -35,6 +44,12 @@ if [ -n "${CHECK_CRON}" ]; then echo "${CHECK_CRON} /usr/bin/flock -n /var/run/backup.lock /bin/check >> /var/log/cron.log 2>&1" >> /var/spool/cron/crontabs/root fi +# If PRUNE_CRON is set we will enable automatic backup checking +if [ -n "${PRUNE_CRON}" ]; then + echo "Setup check cron job with cron expression PRUNE_CRON: ${PRUNE_CRON}" + echo "${PRUNE_CRON} /usr/bin/flock -n /var/run/backup.lock /bin/prune >> /var/log/cron.log 2>&1" >> /var/spool/cron/crontabs/root +fi + # Make sure the file exists before we start tail touch /var/log/cron.log @@ -43,4 +58,4 @@ crond echo "Container started." -exec "$@" \ No newline at end of file +exec "$@" diff --git a/entry.sh-old b/entry.sh-old new file mode 100644 index 0000000..9af1e7e --- /dev/null +++ b/entry.sh-old @@ -0,0 +1,46 @@ +#!/bin/sh + +echo "Starting container ..." + +if [ -n "${NFS_TARGET}" ]; then + echo "Mounting NFS based on NFS_TARGET: ${NFS_TARGET}" + mount -o nolock -v ${NFS_TARGET} /mnt/restic +fi + +restic snapshots ${RESTIC_INIT_ARGS} &>/dev/null +status=$? +echo "Check Repo status $status" + +if [ $status != 0 ]; then + echo "Restic repository '${RESTIC_REPOSITORY}' does not exists. Running restic init." + restic init ${RESTIC_INIT_ARGS} + + init_status=$? + echo "Repo init status $init_status" + + if [ $init_status != 0 ]; then + echo "Failed to init the repository: '${RESTIC_REPOSITORY}'" + exit 1 + fi +fi + + + +echo "Setup backup cron job with cron expression BACKUP_CRON: ${BACKUP_CRON}" +echo "${BACKUP_CRON} /usr/bin/flock -n /var/run/backup.lock /bin/backup >> /var/log/cron.log 2>&1" > /var/spool/cron/crontabs/root + +# If CHECK_CRON is set we will enable automatic backup checking +if [ -n "${CHECK_CRON}" ]; then + echo "Setup check cron job with cron expression CHECK_CRON: ${CHECK_CRON}" + echo "${CHECK_CRON} /usr/bin/flock -n /var/run/backup.lock /bin/check >> /var/log/cron.log 2>&1" >> /var/spool/cron/crontabs/root +fi + +# Make sure the file exists before we start tail +touch /var/log/cron.log + +# start the cron deamon +crond + +echo "Container started." + +exec "$@" diff --git a/package.json b/package.json new file mode 100644 index 0000000..e33a621 --- /dev/null +++ b/package.json @@ -0,0 +1,21 @@ +{ + "name": "npmscript", + "version": "1.0.0", + "description": "A Node.js application that connects to Datebase", + "main": "dump.js", + "scripts": { + "start": "node dump.js", + "dump": "node dump.js" + }, + "author": "emengweb", + "license": "MIT", + "dependencies": { + "moment": "^2.29.4", + "mongodb": "^4.10.0", + "mysql": "^2.18.1", + "mysql2": "^3.11.5", + "pg": "^8.8.0", + "pg-copy-streams": "^6.0.6", + "util": "^0.12.5" + } +} diff --git a/prune.sh b/prune.sh new file mode 100644 index 0000000..4c167bc --- /dev/null +++ b/prune.sh @@ -0,0 +1,94 @@ +#!/bin/sh + +lastLogfile="/var/log/prune-last.log" +lastMailLogfile="/var/log/prune-mail-last.log" +lastMicrosoftTeamsLogfile="/var/log/prune-microsoft-teams-last.log" + +copyErrorLog() { + cp ${lastLogfile} /var/log/prune-error-last.log +} + +logLast() { + echo "$1" >> ${lastLogfile} +} + +if [ -f "/hooks/pre-prune.sh" ]; then + echo "Starting pre-prune script ..." + /hooks/pre-prune.sh +else + echo "Pre-prune script not found ..." +fi + +start=`date +%s` +rm -f ${lastLogfile} ${lastMailLogfile} +echo "Starting Prune at $(date +"%Y-%m-%d %H:%M:%S")" +echo "Starting Prune at $(date)" >> ${lastLogfile} +logLast "PRUNE_CRON: ${PRUNE_CRON}" +logLast "RESTIC_DATA_SUBSET: ${RESTIC_DATA_SUBSET}" +logLast "RESTIC_REPOSITORY: ${RESTIC_REPOSITORY}" +logLast "AWS_ACCESS_KEY_ID: ${AWS_ACCESS_KEY_ID}" + +# Do not save full prune log to logfile but to prune-last.log + +if [ -n "${RESTIC_FORGET_ARGS}" ]; then + echo "Prune about old snapshots based on RESTIC_FORGET_ARGS = ${RESTIC_FORGET_ARGS}" + restic forget --prune ${RESTIC_FORGET_ARGS} >> ${lastLogfile} 2>&1 + rc=$? + logLast "Finished forget at $(date)" + if [[ $rc == 0 ]]; then + echo "Prune Successful" + else + echo "Prune Failed with Status ${rc}" + restic unlock + copyErrorLog + fi +else + restic prune >> ${lastLogfile} 2>&1 + if [[ $rc == 0 ]]; then + echo "Prune Successful" + else + echo "Prune Failed with Status ${rc}" + restic unlock + copyErrorLog + fi +fi +pruneRC=$? +logLast "Finished prune at $(date)" +if [[ $pruneRC == 0 ]]; then + echo "Prune Successful" +else + echo "Prune Failed with Status ${pruneRC}" + restic unlock + copyErrorLog +fi + +end=`date +%s` +echo "Finished Prune at $(date +"%Y-%m-%d %H:%M:%S") after $((end-start)) seconds" + +if [ -n "${TEAMS_WEBHOOK_URL}" ]; then + teamsTitle="Restic Last Prune Log" + teamsMessage=$( cat ${lastLogfile} | sed 's/"/\"/g' | sed "s/'/\'/g" | sed ':a;N;$!ba;s/\n/\n\n/g' ) + teamsReqBody="{\"title\": \"${teamsTitle}\", \"text\": \"${teamsMessage}\" }" + sh -c "curl -H 'Content-Type: application/json' -d '${teamsReqBody}' '${TEAMS_WEBHOOK_URL}' > ${lastMicrosoftTeamsLogfile} 2>&1" + if [ $? == 0 ]; then + echo "Microsoft Teams notification successfully sent." + else + echo "Sending Microsoft Teams notification FAILED. Prune ${lastMicrosoftTeamsLogfile} for further information." + fi +fi + +if [ -n "${MAILX_ARGS}" ]; then + sh -c "mail -v -S sendwait ${MAILX_ARGS} < ${lastLogfile} > ${lastMailLogfile} 2>&1" + if [ $? == 0 ]; then + echo "Mail notification successfully sent." + else + echo "Sending mail notification FAILED. Prune ${lastMailLogfile} for further information." + fi +fi + +if [ -f "/hooks/post-prune.sh" ]; then + echo "Starting post-prune script ..." + /hooks/post-prune.sh $pruneRC +else + echo "Post-prune script not found ..." +fi