Skip to content
This repository was archived by the owner on Jul 14, 2024. It is now read-only.
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 3 additions & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -67,3 +67,6 @@ node_modules/

# Local docker volume mounts
.volumes

# Editor folders
.vscode
15 changes: 0 additions & 15 deletions .qovery.yml

This file was deleted.

2 changes: 1 addition & 1 deletion Dockerfile
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
FROM node:13-alpine
FROM node:16-alpine

RUN mkdir -p /usr/src/app

Expand Down
15 changes: 13 additions & 2 deletions README.md
Original file line number Diff line number Diff line change
@@ -1,2 +1,13 @@
No readme yet - added to try to get qovery to deploy
+1
### CoffeeBot

Coffeebot is a Slackbot used to track the coffee consumption of users in a slack workspace. This was
hacked out just before international coffee day, 2000.

It was initially set up to try to run on Firebase, but that didn't work well because of the startup
time of workers. It was then set up to try to use Qovery, but Qovery wouldn't work - it just kept
producing unusable environments. It was finally set up as a simple docker container, and now runs
happily on a box running Caprover.

It's poorly written, hacked together in a short space of time and given virtually no attention
thereafter, but a single instance has run happily for 1.5 years so it seems to be remarkably
stable considering.
2 changes: 1 addition & 1 deletion docker-compose.yml
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@ version: "3"

services:
db:
image: postgres:12-alpine
image: postgres:15-alpine
volumes:
- pgdata:/var/lib/postgresql/data
ports:
Expand Down
129 changes: 129 additions & 0 deletions src/backup.js
Original file line number Diff line number Diff line change
@@ -0,0 +1,129 @@
const queries = require("./queries");
const { DateTime } = require("luxon");
const AWS = require("aws-sdk");
const { LexModelBuildingService } = require("aws-sdk");

BACKUP_SINCE_QUERIES = [
{ tableName: "abstract_user_v2", query: queries.BACKUP_ABSTRACT_USER_SINCE_DATE_V2_QUERY },
{ tableName: "team_v2", query: queries.BACKUP_TEAM_SINCE_DATE_V2_QUERY },
{ tableName: "user_v2", query: queries.BACKUP_USER_SINCE_DATE_V2_QUERY },
{ tableName: "drink_v2", query: queries.BACKUP_DRINK_SINCE_DATE_V2_QUERY },
]

BACKUP_FULL_QUERIES = [
{ tableName: "abstract_user_v2", query: queries.BACKUP_ABSTRACT_USER_ALL_V2_QUERY },
{ tableName: "team_v2", query: queries.BACKUP_TEAM_ALL_V2_QUERY },
{ tableName: "user_v2", query: queries.BACKUP_USER_ALL_V2_QUERY },
{ tableName: "drink_v2", query: queries.BACKUP_DRINK_ALL_V2_QUERY },]

async function createBackup(pool, awsDetails) {
console.log("Commencing incremental backup");
const client = await pool.connect();

try {
const dt = DateTime.local().setZone("Australia/Melbourne");
const getLastSuccessfulBackupQuery = await client.query(queries.GET_LAST_SUCCESSFUL_BACKUP_DATETIME_QUERY);

let backupFromDate = DateTime.fromSeconds(0);
if (getLastSuccessfulBackupQuery.rows.length > 0) {
backupFromDate = DateTime.fromJSDate(
getLastSuccessfulBackupQuery.rows[0].backup_until
);
}

const rowsToBackUp = Array();

for ({ tableName, query } of BACKUP_SINCE_QUERIES) {
const queryResult = await client.query(query, [
backupFromDate.toISO(),
]);

for (row of queryResult.rows) {
rowsToBackUp.push(JSON.stringify({ tableName: tableName, ...row }))
}
}

const s3 = new AWS.S3({
accessKeyId: awsDetails.AWS_ACCESS_KEY_ID,
secretAccessKey: awsDetails.AWS_SECRET_KEY,
region: awsDetails.AWS_REGION,
});

const params = {
Bucket: awsDetails.AWS_BUCKET_NAME,
Key: `${awsDetails.AWS_BACKUP_FOLDER}/${dt.toISO()}.v2.rows.incremental.json`,
Body: rowsToBackUp.join("\n"),
};

try {
await s3.upload(params).promise();
await client.query(queries.CREATE_BACKUP_ROW_QUERY, [dt.toISO(), dt.toISO(), true, ""]);
const message = `${rowsToBackUp.length} rows backed up. Filename: ${params.Key}.`;
console.log(message);
return {
response_type: "ephemeral",
text: message,
};
} catch (err) {
await client.query(queries.CREATE_BACKUP_ROW_QUERY, [dt.toISO(), dt.toISO(), false, err]);
const message = `Incremental backup error: ${err}`;
console.log(message);
return { response_type: "ephemeral", text: message };
}
} finally {
await client.release();
}
}

async function createFullBackup(pool, awsDetails) {
console.log("Commencing full backup");
const client = await pool.connect();

try {
const dt = DateTime.local().setZone("Australia/Melbourne");
const rowsToBackUp = Array();

for ({ tableName, query } of BACKUP_FULL_QUERIES) {
const queryResult = await client.query(query);

for (row of queryResult.rows) {
rowsToBackUp.push(JSON.stringify({ tableName: tableName, ...row }))
}
}

const s3 = new AWS.S3({
accessKeyId: awsDetails.AWS_ACCESS_KEY_ID,
secretAccessKey: awsDetails.AWS_SECRET_KEY,
region: awsDetails.AWS_REGION,
});

const params = {
Bucket: awsDetails.AWS_BUCKET_NAME,
Key: `${awsDetails.AWS_BACKUP_FOLDER}/${dt.toISO()}.v2.rows.full.json`,
Body: rowsToBackUp.join("\n"),
};

try {
await s3.upload(params).promise();
await client.query(queries.CREATE_BACKUP_ROW_QUERY, [dt.toISO(), dt.toISO(), true, ""]);
const message = `${rowsToBackUp.length} rows backed up. Filename: ${params.Key}.`;
console.log(message);
return {
response_type: "ephemeral",
text: message,
};
} catch (err) {
await client.query(queries.CREATE_BACKUP_ROW_QUERY, [dt.toISO(), dt.toISO(), false, err]);
const message = `Full backup error: ${err}`;
console.log(message);
return { response_type: "ephemeral", text: message };
}
} finally {
await client.release();
}
}

module.exports = {
createBackup,
createFullBackup,
}
14 changes: 7 additions & 7 deletions src/example.env
Original file line number Diff line number Diff line change
@@ -1,17 +1,17 @@
AUTH_KEY="qYe4VZgQMhsAzxqJzmFCJjZi4tVsznq2ogcN3HTm"

QOVERY_DATABASE_COFFEE_DB_USERNAME="coffeebot"
QOVERY_DATABASE_COFFEE_DB_HOST="db"
QOVERY_DATABASE_COFFEE_DB_DATABASE="coffeebot"
QOVERY_DATABASE_COFFEE_DB_PASSWORD="coffeebot_password"
QOVERY_DATABASE_COFFEE_DB_PORT=5432
AUTH_KEY=
ADMIN_KEY=

POSTGRES_HOST="db"
POSTGRES_USER="coffeebot"
POSTGRES_PASSWORD="coffeebot_password"
POSTGRES_DB="coffeebot"
POSTGRES_PORT=5432

AWS_ACCESS_KEY_ID=
AWS_SECRET_KEY=
AWS_BUCKET_NAME="coffeebot-backups"
AWS_BACKUP_FOLDER="coffeebot-backups"
AWS_REGION=

REQUEST_PASSTHROUGH_HOST="127.0.0.1"
REQUEST_PASSTHROUGH_PORT="80"
Loading