-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathbackup.js
executable file
·149 lines (131 loc) · 3.63 KB
/
backup.js
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
#!/usr/bin/env node
import * as fs from 'fs'
import * as yaml from 'js-yaml'
import { performance } from 'perf_hooks'
import { serializeError } from 'serialize-error'
import * as diskusage from 'diskusage'
import yargs from 'yargs'
import { hideBin } from 'yargs/helpers'
import { logger } from './lib/logger.js'
import { timingsToSpans } from './lib/utils.js'
import * as sxbackup from './lib/btrfs-sxbackup.js'
import * as btrfs from './lib/btrfs.js'
const configFile = 'config.yaml'
/* Loading the config file */
let config
try {
const fileContents = fs.readFileSync(new URL(configFile, import.meta.url), 'utf8')
config = yaml.load(fileContents)
} catch (e) {
console.log(e)
logger.error(serializeError(e), 'Probjems with loading config file \'config.yaml\'')
process.exit(1)
}
/**
* Does a single backup job.
*
* @param {string} jobId - job name, matching to config file key
*/
const backupDo = function (jobId) {
const job = makeJobObject(jobId)
let op
const timings = {}
try {
timings.start = performance.now()
op = 'run'
const subvolume = sxbackup.run(job.destination)
timings[op] = performance.now()
op = 'statistics'
const destinationDu = diskusage.checkSync(job.destination)
const subvolumesDu = btrfs.subvolumesDu(job.destination)
const subvolumeStat = {
used: subvolumesDu.subvolumes[subvolume].total,
exclusive: subvolumesDu.subvolumes[subvolume].exclusive,
exclusiveTotal: subvolumesDu.exclusive,
total: destinationDu.total,
free: destinationDu.available
}
timings[op] = performance.now()
op = 'checkDestination'
const checkStat = btrfs.check(job.destination)
timings[op] = performance.now()
logger.info({
function: 'backupDo',
jobId,
job,
checkStat,
subvolumeStat,
duration: timingsToSpans(timings)
}, `Backup job ${jobId} finished`)
} catch (e) {
logger.error({
function: 'backupDo',
jobId,
job,
stage: op,
exception: serializeError(e)
}, `Backup job ${jobId} failed on stage '${op}'`)
}
}
/**
* Configures a backup job (creates new or updates config).
*
* @param {string} jobId - job name, matching to config file key
*/
export const backupConfigure = function (jobId) {
const job = makeJobObject(jobId)
const action = sxbackup.info(job.destination) === false
? 'init'
: 'update'
sxbackup.configure(job, action)
logger.info({
function: 'backupConfigure',
jobId,
job
}, `Backup job ${jobId} configured`)
}
/**
* Generate the full job object from config by id
*
* @param {string} jobId - job identifier
*/
const makeJobObject = function (jobId) {
return {
...config.jobDefaults,
...config.jobs[jobId]
}
}
/**
* Launches the configure job for all configured backup jobs.
*
* @param {string} argv - cli arguments
*/
const doConfiguration = function (argv = {}) {
logger.info('Update backup jobs started')
for (const jobId in config.jobs) {
backupConfigure(jobId)
}
logger.info('Update backup jobs finished')
}
/**
* Launches the backup job for all configured backup jobs.
*
* @param {string} argv - cli arguments
*/
const doBackups = function (argv = {}) {
logger.info('Backup process started')
for (const jobId in config.jobs) {
backupDo(jobId)
}
logger.info('Backup process finished')
}
// eslint-disable-next-line no-unused-expressions
yargs(hideBin(process.argv))
.command('configure', 'Configure (create and update) backup jobs', () => {}, (argv) => {
doConfiguration(argv)
})
.command('backup', 'Execute backup jobs', () => {}, (argv) => {
doBackups(argv)
})
.demandCommand(1)
.argv