1089 Commits
1aeb7bf418
...
20a564975f
14355 changed files with 2013575 additions and 1735858 deletions
Split View
Diff Options
-
45.github/ISSUE_TEMPLATE/bug_report.md
-
2.gitignore
-
5.jshintignore
-
346CHANGELOG.md
-
38Jenkinsfile
-
4bower.json
-
347config/config.example.js
-
2customize.dist/ckeditor-config.js
-
69customize.dist/loading.js
-
10customize.dist/login.js
-
3customize.dist/messages.js
-
8customize.dist/pages.js
-
53customize.dist/src/less2/include/alertify.less
-
52customize.dist/src/less2/include/buttons.less
-
4customize.dist/src/less2/include/colortheme.less
-
113customize.dist/src/less2/include/corner.less
-
3customize.dist/src/less2/include/dropdown.less
-
7customize.dist/src/less2/include/fileupload.less
-
49customize.dist/src/less2/include/markdown.less
-
17customize.dist/src/less2/include/modal.less
-
120customize.dist/src/less2/include/modals-ui-elements.less
-
9customize.dist/src/less2/include/notifications.less
-
26customize.dist/src/less2/include/sidebar-layout.less
-
13customize.dist/src/less2/include/tokenfield.less
-
18customize.dist/src/less2/include/toolbar.less
-
23customize.dist/src/less2/include/usergrid.less
-
1customize.dist/src/less2/include/variables.less
-
2customize.dist/src/outer.css
-
14customize.dist/translations/messages.hi.js
-
14customize.dist/translations/messages.sv.js
-
28docs/cryptpad.service
-
11docs/example.nginx.conf
-
1011historyKeeper.js
-
65import
-
35lib/api.js
-
197lib/commands/admin-rpc.js
-
172lib/commands/block.js
-
275lib/commands/channel.js
-
149lib/commands/core.js
-
189lib/commands/metadata.js
-
298lib/commands/pin-rpc.js
-
107lib/commands/quota.js
-
89lib/commands/upload.js
-
11lib/deduplicate.js
-
86lib/defaults.js
-
285lib/historyKeeper.js
-
910lib/hk-util.js
-
26lib/load-config.js
-
13lib/log.js
-
217lib/metadata.js
-
7lib/once.js
-
185lib/pins.js
-
235lib/plan.js
-
216lib/rpc.js
-
172lib/schedule.js
-
628lib/storage/blob.js
-
1260lib/storage/file.js
-
397lib/storage/tasks.js
-
84lib/stream-file.js
-
576lib/workers/db-worker.js
-
367lib/workers/index.js
-
527package-lock.json
-
10package.json
-
1766rpc.js
-
3scripts/check-account-deletion.js
-
42scripts/compare-pin-methods.js
-
8scripts/diagnose-archive-conflicts.js
-
90scripts/evict-inactive.js
-
4scripts/expire-channels.js
-
4scripts/migrations/migrate-tasks-v1.js
-
5scripts/restore-archived.js
-
235scripts/tests/test-mailbox.js
-
46scripts/tests/test-pins.js
-
41scripts/tests/test-plan.js
-
183scripts/tests/test-rpc.js
-
220scripts/tests/test-scheduler.js
-
295server.js
-
59storage/README.md
-
628storage/blob.js
-
1053storage/file.js
-
413storage/tasks.js
-
24www/admin/app-admin.less
-
2www/admin/index.html
-
29www/admin/inner.js
-
42www/assert/main.js
-
41www/auth/main.js
-
33www/code/app-code.less
-
2www/code/index.html
-
117www/code/inner.js
-
750www/code/markers.js
-
4www/code/mermaid-new.css
-
59134www/code/mermaid.js
-
56www/code/mermaid.min.js
-
9www/common/application_config_internal.js
-
176www/common/common-hash.js
-
332www/common/common-interface.js
-
12www/common/common-messaging.js
-
1916www/common/common-ui-elements.js
-
74www/common/common-util.js
-
273www/common/cryptpad-common.js
@ -0,0 +1,45 @@ |
|||
--- |
|||
name: Bug report |
|||
about: Create a report to help us improve |
|||
title: '' |
|||
labels: '' |
|||
assignees: '' |
|||
|
|||
--- |
|||
|
|||
**Describe the bug** |
|||
A clear and concise description of what the bug is. |
|||
|
|||
**Where did it happen?** |
|||
Did the issue occur on CryptPad.fr or an instance hosted by a third-party? |
|||
If on another instance, please provide its full URL. |
|||
|
|||
**To Reproduce** |
|||
Steps to reproduce the behavior: |
|||
1. Go to '...' |
|||
2. Click on '....' |
|||
3. Scroll down to '....' |
|||
4. See error |
|||
|
|||
**Expected behavior** |
|||
A clear and concise description of what you expected to happen. |
|||
|
|||
**Screenshots** |
|||
If applicable, add screenshots to help explain your problem. |
|||
|
|||
**Browser (please complete the following information):** |
|||
- OS: [e.g. iOS] |
|||
- Browser [e.g. firefox, tor browser, chrome, safari, brave, edge, ???] |
|||
- variations [e.g. Firefox nightly, Firefox ESR, Chromium, Ungoogled chrome] |
|||
- Version [e.g. 22] |
|||
- Extensions installed (UBlock Origin, Passbolt, LibreJS] |
|||
- Browser tweaks [e.g. firefox "Enhanced Tracking Protection" strict/custom mode, tor browser "safer" security level, chrome incognito mode] |
|||
|
|||
**Smartphone (please complete the following information):** |
|||
- Device: [e.g. iPhone6] |
|||
- OS: [e.g. iOS8.1] |
|||
- Browser [e.g. stock browser, safari] |
|||
- Version [e.g. 22] |
|||
|
|||
**Additional context** |
|||
Add any other context about the problem here. |
|||
@ -0,0 +1,38 @@ |
|||
pipeline { |
|||
environment { |
|||
registry = 'https://registry.hub.docker.com' |
|||
registryCredential = 'dockerhub_jcabillot' |
|||
dockerImage = 'jcabillot/cryptpad' |
|||
} |
|||
|
|||
agent any |
|||
|
|||
triggers { |
|||
cron('@midnight') |
|||
} |
|||
|
|||
stages { |
|||
stage('Clone repository') { |
|||
steps{ |
|||
checkout scm |
|||
} |
|||
} |
|||
|
|||
stage('Build image') { |
|||
steps{ |
|||
sh 'docker build --force-rm=true --no-cache=true --pull -t ${dockerImage} .' |
|||
} |
|||
} |
|||
|
|||
stage('Deploy Image') { |
|||
steps{ |
|||
script { |
|||
withCredentials([usernamePassword(credentialsId: 'dockerhub_jcabillot', usernameVariable: 'DOCKER_USER', passwordVariable: 'DOCKER_PASS')]) { |
|||
sh 'docker login --username ${DOCKER_USER} --password ${DOCKER_PASS}' |
|||
sh 'docker push ${dockerImage}' |
|||
} |
|||
} |
|||
} |
|||
} |
|||
} |
|||
} |
|||
@ -0,0 +1,14 @@ |
|||
/* |
|||
* You can override the translation text using this file. |
|||
* The recommended method is to make a copy of this file (/customize.dist/translations/messages.{LANG}.js) |
|||
in a 'customize' directory (/customize/translations/messages.{LANG}.js). |
|||
* If you want to check all the existing translation keys, you can open the internal language file |
|||
but you should not change it directly (/common/translations/messages.{LANG}.js) |
|||
*/ |
|||
define(['/common/translations/messages.hi.js'], function (Messages) { |
|||
// Replace the existing keys in your copied file here:
|
|||
// Messages.button_newpad = "New Rich Text Document";
|
|||
|
|||
return Messages; |
|||
}); |
|||
|
|||
@ -0,0 +1,14 @@ |
|||
/* |
|||
* You can override the translation text using this file. |
|||
* The recommended method is to make a copy of this file (/customize.dist/translations/messages.{LANG}.js) |
|||
in a 'customize' directory (/customize/translations/messages.{LANG}.js). |
|||
* If you want to check all the existing translation keys, you can open the internal language file |
|||
but you should not change it directly (/common/translations/messages.{LANG}.js) |
|||
*/ |
|||
define(['/common/translations/messages.sv.js'], function (Messages) { |
|||
// Replace the existing keys in your copied file here:
|
|||
// Messages.button_newpad = "New Rich Text Document";
|
|||
|
|||
return Messages; |
|||
}); |
|||
|
|||
@ -0,0 +1,28 @@ |
|||
[Unit] |
|||
Description=CryptPad API server |
|||
|
|||
[Service] |
|||
ExecStart=/home/cryptpad/.nvm/versions/node/v12.14.0/bin/node /home/cryptpad/cryptpad/server.js |
|||
# modify to match the location of your cryptpad repository |
|||
WorkingDirectory=/home/cryptpad/cryptpad |
|||
|
|||
Restart=always |
|||
# Restart service after 10 seconds if node service crashes |
|||
RestartSec=2 |
|||
|
|||
# Output to syslog |
|||
StandardOutput=syslog |
|||
StandardError=syslog |
|||
SyslogIdentifier=cryptpad |
|||
User=cryptpad |
|||
Group=cryptpad |
|||
# modify to match your working directory |
|||
Environment='PWD="/home/cryptpad/cryptpad/cryptpad"' |
|||
|
|||
# systemd sets the open file limit to 4000 unless you override it |
|||
# cryptpad stores its data with the filesystem, so you should increase this to match the value of `ulimit -n` |
|||
# or risk EMFILE errors. |
|||
LimitNOFILE=1000000 |
|||
|
|||
[Install] |
|||
WantedBy=multi-user.target |
|||
1011
historyKeeper.js
File diff suppressed because it is too large
View File
File diff suppressed because it is too large
View File
@ -1,65 +0,0 @@ |
|||
#!/usr/bin/env node |
|||
/* globals process */ |
|||
|
|||
var Config = require("./config"); |
|||
var Fs = require("fs"); |
|||
var Storage = require(Config.storage); |
|||
|
|||
var args = process.argv.slice(2); |
|||
|
|||
if (!args.length) { |
|||
console.log("Insufficient arguments!"); |
|||
console.log("Pass a path to a database backup!"); |
|||
process.exit(); |
|||
} |
|||
|
|||
var dump = Fs.readFileSync(args[0], 'utf-8'); |
|||
|
|||
var ready = function (store) { |
|||
var lock = 0; |
|||
dump.split(/\n/) |
|||
.filter(function (line) { |
|||
return line; |
|||
}) |
|||
.forEach(function (line, i) { |
|||
lock++; |
|||
var parts; |
|||
|
|||
var channel; |
|||
var msg; |
|||
|
|||
line.replace(/^(.*?)\|(.*)$/, function (all, c, m) { |
|||
channel = c; |
|||
msg = m; |
|||
return ''; |
|||
}); |
|||
|
|||
if (!channel || !msg) { |
|||
console.log("BAD LINE on line %s", i); |
|||
return; |
|||
} |
|||
|
|||
try { |
|||
JSON.parse(msg); |
|||
} catch (err) { |
|||
console.log("BAD LINE on line %s", i); |
|||
console.log(msg); |
|||
console.log(); |
|||
} |
|||
|
|||
store.message(channel, msg, function () { |
|||
console.log(line); |
|||
lock--; |
|||
if (!lock) { |
|||
console.log("DONE"); |
|||
process.exit(0); |
|||
} |
|||
}); |
|||
}); |
|||
}; |
|||
|
|||
Storage.create(Config, function (store) { |
|||
console.log("READY"); |
|||
ready(store); |
|||
}); |
|||
|
|||
@ -0,0 +1,35 @@ |
|||
/* jshint esversion: 6 */ |
|||
const WebSocketServer = require('ws').Server; |
|||
const NetfluxSrv = require('chainpad-server'); |
|||
|
|||
module.exports.create = function (config) { |
|||
// asynchronously create a historyKeeper and RPC together
|
|||
require('./historyKeeper.js').create(config, function (err, historyKeeper) { |
|||
if (err) { throw err; } |
|||
|
|||
var log = config.log; |
|||
|
|||
// spawn ws server and attach netflux event handlers
|
|||
NetfluxSrv.create(new WebSocketServer({ server: config.httpServer})) |
|||
.on('channelClose', historyKeeper.channelClose) |
|||
.on('channelMessage', historyKeeper.channelMessage) |
|||
.on('channelOpen', historyKeeper.channelOpen) |
|||
.on('sessionClose', historyKeeper.sessionClose) |
|||
.on('error', function (error, label, info) { |
|||
if (!error) { return; } |
|||
if (['EPIPE', 'ECONNRESET'].indexOf(error && error.code) !== -1) { return; } |
|||
/* labels: |
|||
SEND_MESSAGE_FAIL, SEND_MESSAGE_FAIL_2, FAIL_TO_DISCONNECT, |
|||
FAIL_TO_TERMINATE, HANDLE_CHANNEL_LEAVE, NETFLUX_BAD_MESSAGE, |
|||
NETFLUX_WEBSOCKET_ERROR, NF_ENOENT |
|||
*/ |
|||
log.error(label, { |
|||
code: error.code, |
|||
message: error.message, |
|||
stack: error.stack, |
|||
info: info, |
|||
}); |
|||
}) |
|||
.register(historyKeeper.id, historyKeeper.directMessage); |
|||
}); |
|||
}; |
|||
@ -0,0 +1,197 @@ |
|||
/*jshint esversion: 6 */ |
|||
/* globals process */ |
|||
const nThen = require("nthen"); |
|||
const getFolderSize = require("get-folder-size"); |
|||
const Util = require("../common-util"); |
|||
const Ulimit = require("ulimit"); |
|||
|
|||
var Fs = require("fs"); |
|||
|
|||
var Admin = module.exports; |
|||
|
|||
var getFileDescriptorCount = function (Env, server, cb) { |
|||
Fs.readdir('/proc/self/fd', function(err, list) { |
|||
if (err) { return void cb(err); } |
|||
cb(void 0, list.length); |
|||
}); |
|||
}; |
|||
|
|||
var getFileDescriptorLimit = function (env, server, cb) { |
|||
Ulimit(cb); |
|||
}; |
|||
|
|||
var getCacheStats = function (env, server, cb) { |
|||
var metaSize = 0; |
|||
var channelSize = 0; |
|||
var metaCount = 0; |
|||
var channelCount = 0; |
|||
|
|||
try { |
|||
var meta = env.metadata_cache; |
|||
for (var x in meta) { |
|||
if (meta.hasOwnProperty(x)) { |
|||
metaCount++; |
|||
metaSize += JSON.stringify(meta[x]).length; |
|||
} |
|||
} |
|||
|
|||
var channels = env.channel_cache; |
|||
for (var y in channels) { |
|||
if (channels.hasOwnProperty(y)) { |
|||
channelCount++; |
|||
channelSize += JSON.stringify(channels[y]).length; |
|||
} |
|||
} |
|||
} catch (err) { |
|||
return void cb(err && err.message); |
|||
} |
|||
|
|||
cb(void 0, { |
|||
metadata: metaCount, |
|||
metaSize: metaSize, |
|||
channel: channelCount, |
|||
channelSize: channelSize, |
|||
memoryUsage: process.memoryUsage(), |
|||
}); |
|||
}; |
|||
|
|||
var getActiveSessions = function (Env, Server, cb) { |
|||
var stats = Server.getSessionStats(); |
|||
cb(void 0, [ |
|||
stats.total, |
|||
stats.unique |
|||
]); |
|||
}; |
|||
|
|||
var shutdown = function (Env, Server, cb) { |
|||
if (true) { |
|||
return void cb('E_NOT_IMPLEMENTED'); |
|||
} |
|||
|
|||
// disconnect all users and reject new connections
|
|||
Server.shutdown(); |
|||
|
|||
// stop all intervals that may be running
|
|||
Object.keys(Env.intervals).forEach(function (name) { |
|||
clearInterval(Env.intervals[name]); |
|||
}); |
|||
|
|||
// set a flag to prevent incoming database writes
|
|||
// wait until all pending writes are complete
|
|||
// then process.exit(0);
|
|||
// and allow system functionality to restart the server
|
|||
}; |
|||
|
|||
var getRegisteredUsers = function (Env, Server, cb) { |
|||
Env.batchRegisteredUsers('', cb, function (done) { |
|||
var dir = Env.paths.pin; |
|||
var folders; |
|||
var users = 0; |
|||
nThen(function (waitFor) { |
|||
Fs.readdir(dir, waitFor(function (err, list) { |
|||
if (err) { |
|||
waitFor.abort(); |
|||
return void done(err); |
|||
} |
|||
folders = list; |
|||
})); |
|||
}).nThen(function (waitFor) { |
|||
folders.forEach(function (f) { |
|||
var dir = Env.paths.pin + '/' + f; |
|||
Fs.readdir(dir, waitFor(function (err, list) { |
|||
if (err) { return; } |
|||
users += list.length; |
|||
})); |
|||
}); |
|||
}).nThen(function () { |
|||
done(void 0, users); |
|||
}); |
|||
}); |
|||
}; |
|||
|
|||
var getDiskUsage = function (Env, Server, cb) { |
|||
Env.batchDiskUsage('', cb, function (done) { |
|||
var data = {}; |
|||
nThen(function (waitFor) { |
|||
getFolderSize('./', waitFor(function(err, info) { |
|||
data.total = info; |
|||
})); |
|||
getFolderSize(Env.paths.pin, waitFor(function(err, info) { |
|||
data.pin = info; |
|||
})); |
|||
getFolderSize(Env.paths.blob, waitFor(function(err, info) { |
|||
data.blob = info; |
|||
})); |
|||
getFolderSize(Env.paths.staging, waitFor(function(err, info) { |
|||
data.blobstage = info; |
|||
})); |
|||
getFolderSize(Env.paths.block, waitFor(function(err, info) { |
|||
data.block = info; |
|||
})); |
|||
getFolderSize(Env.paths.data, waitFor(function(err, info) { |
|||
data.datastore = info; |
|||
})); |
|||
}).nThen(function () { |
|||
done(void 0, data); |
|||
}); |
|||
}); |
|||
}; |
|||
|
|||
var getActiveChannelCount = function (Env, Server, cb) { |
|||
cb(void 0, Server.getActiveChannelCount()); |
|||
}; |
|||
|
|||
var flushCache = function (Env, Server, cb) { |
|||
Env.flushCache(); |
|||
cb(void 0, true); |
|||
}; |
|||
|
|||
|
|||
// CryptPad_AsyncStore.rpc.send('ADMIN', ['SET_DEFAULT_STORAGE_LIMIT', 1024 * 1024 * 1024 /* 1GB */], console.log)
|
|||
var setDefaultStorageLimit = function (Env, Server, cb, data) { |
|||
var value = Array.isArray(data) && data[1]; |
|||
if (typeof(value) !== 'number' || value <= 0) { return void cb('EINVAL'); } |
|||
var previous = Env.defaultStorageLimit; |
|||
var change = { |
|||
previous: previous, |
|||
current: value, |
|||
}; |
|||
|
|||
Env.defaultStorageLimit = value; |
|||
Env.Log.info('DEFAULT_STORAGE_LIMIT_UPDATE', change); |
|||
|
|||
cb(void 0, change); |
|||
}; |
|||
|
|||
var commands = { |
|||
ACTIVE_SESSIONS: getActiveSessions, |
|||
ACTIVE_PADS: getActiveChannelCount, |
|||
REGISTERED_USERS: getRegisteredUsers, |
|||
DISK_USAGE: getDiskUsage, |
|||
FLUSH_CACHE: flushCache, |
|||
SHUTDOWN: shutdown, |
|||
GET_FILE_DESCRIPTOR_COUNT: getFileDescriptorCount, |
|||
GET_FILE_DESCRIPTOR_LIMIT: getFileDescriptorLimit, |
|||
SET_DEFAULT_STORAGE_LIMIT: setDefaultStorageLimit, |
|||
GET_CACHE_STATS: getCacheStats, |
|||
}; |
|||
|
|||
Admin.command = function (Env, safeKey, data, _cb, Server) { |
|||
var cb = Util.once(Util.mkAsync(_cb)); |
|||
|
|||
var admins = Env.admins; |
|||
//var unsafeKey = Util.unescapeKeyCharacters(safeKey);
|
|||
if (admins.indexOf(safeKey) === -1) { |
|||
return void cb("FORBIDDEN"); |
|||
} |
|||
|
|||
var command = commands[data[0]]; |
|||
|
|||
if (typeof(command) === 'function') { |
|||
return void command(Env, Server, cb, data); |
|||
} |
|||
|
|||
return void cb('UNHANDLED_ADMIN_COMMAND'); |
|||
}; |
|||
|
|||
|
|||
@ -0,0 +1,172 @@ |
|||
/*jshint esversion: 6 */ |
|||
/* globals Buffer*/ |
|||
var Block = module.exports; |
|||
|
|||
const Fs = require("fs"); |
|||
const Fse = require("fs-extra"); |
|||
const Path = require("path"); |
|||
const Nacl = require("tweetnacl/nacl-fast"); |
|||
const nThen = require("nthen"); |
|||
|
|||
const Util = require("../common-util"); |
|||
|
|||
/* |
|||
We assume that the server is secured against MitM attacks |
|||
via HTTPS, and that malicious actors do not have code execution |
|||
capabilities. If they do, we have much more serious problems. |
|||
|
|||
The capability to replay a block write or remove results in either |
|||
a denial of service for the user whose block was removed, or in the |
|||
case of a write, a rollback to an earlier password. |
|||
|
|||
Since block modification is destructive, this can result in loss |
|||
of access to the user's drive. |
|||
|
|||
So long as the detached signature is never observed by a malicious |
|||
party, and the server discards it after proof of knowledge, replays |
|||
are not possible. However, this precludes verification of the signature |
|||
at a later time. |
|||
|
|||
Despite this, an integrity check is still possible by the original |
|||
author of the block, since we assume that the block will have been |
|||
encrypted with xsalsa20-poly1305 which is authenticated. |
|||
*/ |
|||
var validateLoginBlock = function (Env, publicKey, signature, block, cb) { // FIXME BLOCKS
|
|||
// convert the public key to a Uint8Array and validate it
|
|||
if (typeof(publicKey) !== 'string') { return void cb('E_INVALID_KEY'); } |
|||
|
|||
var u8_public_key; |
|||
try { |
|||
u8_public_key = Nacl.util.decodeBase64(publicKey); |
|||
} catch (e) { |
|||
return void cb('E_INVALID_KEY'); |
|||
} |
|||
|
|||
var u8_signature; |
|||
try { |
|||
u8_signature = Nacl.util.decodeBase64(signature); |
|||
} catch (e) { |
|||
Env.Log.error('INVALID_BLOCK_SIGNATURE', e); |
|||
return void cb('E_INVALID_SIGNATURE'); |
|||
} |
|||
|
|||
// convert the block to a Uint8Array
|
|||
var u8_block; |
|||
try { |
|||
u8_block = Nacl.util.decodeBase64(block); |
|||
} catch (e) { |
|||
return void cb('E_INVALID_BLOCK'); |
|||
} |
|||
|
|||
// take its hash
|
|||
var hash = Nacl.hash(u8_block); |
|||
|
|||
// validate the signature against the hash of the content
|
|||
var verified = Nacl.sign.detached.verify(hash, u8_signature, u8_public_key); |
|||
|
|||
// existing authentication ensures that users cannot replay old blocks
|
|||
|
|||
// call back with (err) if unsuccessful
|
|||
if (!verified) { return void cb("E_COULD_NOT_VERIFY"); } |
|||
|
|||
return void cb(null, u8_block); |
|||
}; |
|||
|
|||
var createLoginBlockPath = function (Env, publicKey) { // FIXME BLOCKS
|
|||
// prepare publicKey to be used as a file name
|
|||
var safeKey = Util.escapeKeyCharacters(publicKey); |
|||
|
|||
// validate safeKey
|
|||
if (typeof(safeKey) !== 'string') { |
|||
return; |
|||
} |
|||
|
|||
// derive the full path
|
|||
// /home/cryptpad/cryptpad/block/fg/fg32kefksjdgjkewrjksdfksjdfsdfskdjfsfd
|
|||
return Path.join(Env.paths.block, safeKey.slice(0, 2), safeKey); |
|||
}; |
|||
|
|||
Block.writeLoginBlock = function (Env, safeKey, msg, cb) { // FIXME BLOCKS
|
|||
//console.log(msg);
|
|||
var publicKey = msg[0]; |
|||
var signature = msg[1]; |
|||
var block = msg[2]; |
|||
|
|||
validateLoginBlock(Env, publicKey, signature, block, function (e, validatedBlock) { |
|||
if (e) { return void cb(e); } |
|||
if (!(validatedBlock instanceof Uint8Array)) { return void cb('E_INVALID_BLOCK'); } |
|||
|
|||
// derive the filepath
|
|||
var path = createLoginBlockPath(Env, publicKey); |
|||
|
|||
// make sure the path is valid
|
|||
if (typeof(path) !== 'string') { |
|||
return void cb('E_INVALID_BLOCK_PATH'); |
|||
} |
|||
|
|||
var parsed = Path.parse(path); |
|||
if (!parsed || typeof(parsed.dir) !== 'string') { |
|||
return void cb("E_INVALID_BLOCK_PATH_2"); |
|||
} |
|||
|
|||
nThen(function (w) { |
|||
// make sure the path to the file exists
|
|||
Fse.mkdirp(parsed.dir, w(function (e) { |
|||
if (e) { |
|||
w.abort(); |
|||
cb(e); |
|||
} |
|||
})); |
|||
}).nThen(function () { |
|||
// actually write the block
|
|||
|
|||
// flow is dumb and I need to guard against this which will never happen
|
|||
/*:: if (typeof(validatedBlock) === 'undefined') { throw new Error('should never happen'); } */ |
|||
/*:: if (typeof(path) === 'undefined') { throw new Error('should never happen'); } */ |
|||
Fs.writeFile(path, Buffer.from(validatedBlock), { encoding: "binary", }, function (err) { |
|||
if (err) { return void cb(err); } |
|||
cb(); |
|||
}); |
|||
}); |
|||
}); |
|||
}; |
|||
|
|||
/* |
|||
When users write a block, they upload the block, and provide |
|||
a signature proving that they deserve to be able to write to |
|||
the location determined by the public key. |
|||
|
|||
When removing a block, there is nothing to upload, but we need |
|||
to sign something. Since the signature is considered sensitive |
|||
information, we can just sign some constant and use that as proof. |
|||
|
|||
*/ |
|||
Block.removeLoginBlock = function (Env, safeKey, msg, cb) { // FIXME BLOCKS
|
|||
var publicKey = msg[0]; |
|||
var signature = msg[1]; |
|||
var block = Nacl.util.decodeUTF8('DELETE_BLOCK'); // clients and the server will have to agree on this constant
|
|||
|
|||
validateLoginBlock(Env, publicKey, signature, block, function (e /*::, validatedBlock */) { |
|||
if (e) { return void cb(e); } |
|||
// derive the filepath
|
|||
var path = createLoginBlockPath(Env, publicKey); |
|||
|
|||
// make sure the path is valid
|
|||
if (typeof(path) !== 'string') { |
|||
return void cb('E_INVALID_BLOCK_PATH'); |
|||
} |
|||
|
|||
// FIXME COLDSTORAGE
|
|||
Fs.unlink(path, function (err) { |
|||
Env.Log.info('DELETION_BLOCK_BY_OWNER_RPC', { |
|||
publicKey: publicKey, |
|||
path: path, |
|||
status: err? String(err): 'SUCCESS', |
|||
}); |
|||
|
|||
if (err) { return void cb(err); } |
|||
cb(); |
|||
}); |
|||
}); |
|||
}; |
|||
|
|||
@ -0,0 +1,275 @@ |
|||
/*jshint esversion: 6 */ |
|||
const Channel = module.exports; |
|||
|
|||
const Util = require("../common-util"); |
|||
const nThen = require("nthen"); |
|||
const Core = require("./core"); |
|||
const Metadata = require("./metadata"); |
|||
const HK = require("../hk-util"); |
|||
|
|||
Channel.clearOwnedChannel = function (Env, safeKey, channelId, cb, Server) { |
|||
if (typeof(channelId) !== 'string' || channelId.length !== 32) { |
|||
return cb('INVALID_ARGUMENTS'); |
|||
} |
|||
var unsafeKey = Util.unescapeKeyCharacters(safeKey); |
|||
|
|||
Metadata.getMetadata(Env, channelId, function (err, metadata) { |
|||
if (err) { return void cb(err); } |
|||
if (!Core.hasOwners(metadata)) { return void cb('E_NO_OWNERS'); } |
|||
// Confirm that the channel is owned by the user in question
|
|||
if (!Core.isOwner(metadata, unsafeKey)) { |
|||
return void cb('INSUFFICIENT_PERMISSIONS'); |
|||
} |
|||
return void Env.msgStore.clearChannel(channelId, function (e) { |
|||
if (e) { return void cb(e); } |
|||
cb(); |
|||
|
|||
const channel_cache = Env.channel_cache; |
|||
|
|||
const clear = function () { |
|||
// delete the channel cache because it will have been invalidated
|
|||
delete channel_cache[channelId]; |
|||
}; |
|||
|
|||
nThen(function (w) { |
|||
Server.getChannelUserList(channelId).forEach(function (userId) { |
|||
Server.send(userId, [ |
|||
0, |
|||
Env.historyKeeper.id, |
|||
'MSG', |
|||
userId, |
|||
JSON.stringify({ |
|||
error: 'ECLEARED', |
|||
channel: channelId |
|||
}) |
|||
], w()); |
|||
}); |
|||
}).nThen(function () { |
|||
clear(); |
|||
}).orTimeout(function () { |
|||
Env.Log.warn("ON_CHANNEL_CLEARED_TIMEOUT", channelId); |
|||
clear(); |
|||
}, 30000); |
|||
}); |
|||
}); |
|||
}; |
|||
|
|||
var archiveOwnedChannel = function (Env, safeKey, channelId, cb, Server) { |
|||
var unsafeKey = Util.unescapeKeyCharacters(safeKey); |
|||
Metadata.getMetadata(Env, channelId, function (err, metadata) { |
|||
if (err) { return void cb(err); } |
|||
if (!Core.hasOwners(metadata)) { return void cb('E_NO_OWNERS'); } |
|||
if (!Core.isOwner(metadata, unsafeKey)) { |
|||
return void cb('INSUFFICIENT_PERMISSIONS'); |
|||
} |
|||
// temporarily archive the file
|
|||
return void Env.msgStore.archiveChannel(channelId, function (e) { |
|||
Env.Log.info('ARCHIVAL_CHANNEL_BY_OWNER_RPC', { |
|||
unsafeKey: unsafeKey, |
|||
channelId: channelId, |
|||
status: e? String(e): 'SUCCESS', |
|||
}); |
|||
if (e) { |
|||
return void cb(e); |
|||
} |
|||
cb(void 0, 'OK'); |
|||
|
|||
const channel_cache = Env.channel_cache; |
|||
const metadata_cache = Env.metadata_cache; |
|||
|
|||
const clear = function () { |
|||
delete channel_cache[channelId]; |
|||
Server.clearChannel(channelId); |
|||
delete metadata_cache[channelId]; |
|||
}; |
|||
|
|||
// an owner of a channel deleted it
|
|||
nThen(function (w) { |
|||
// close the channel in the store
|
|||
Env.msgStore.closeChannel(channelId, w()); |
|||
}).nThen(function (w) { |
|||
// Server.channelBroadcast would be better
|
|||
// but we can't trust it to track even one callback,
|
|||
// let alone many in parallel.
|
|||
// so we simulate it on this side to avoid race conditions
|
|||
Server.getChannelUserList(channelId).forEach(function (userId) { |
|||
Server.send(userId, [ |
|||
0, |
|||
Env.historyKeeper.id, |
|||
"MSG", |
|||
userId, |
|||
JSON.stringify({ |
|||
error: 'EDELETED', |
|||
channel: channelId, |
|||
}) |
|||
], w()); |
|||
}); |
|||
}).nThen(function () { |
|||
// clear the channel's data from memory
|
|||
// once you've sent everyone a notice that the channel has been deleted
|
|||
clear(); |
|||
}).orTimeout(function () { |
|||
Env.Log.warn('ON_CHANNEL_DELETED_TIMEOUT', channelId); |
|||
clear(); |
|||
}, 30000); |
|||
}); |
|||
}); |
|||
}; |
|||
|
|||
Channel.removeOwnedChannel = function (Env, safeKey, channelId, __cb, Server) { |
|||
var _cb = Util.once(Util.mkAsync(__cb)); |
|||
|
|||
if (typeof(channelId) !== 'string' || !Core.isValidId(channelId)) { |
|||
return _cb('INVALID_ARGUMENTS'); |
|||
} |
|||
|
|||
// archiving large channels or files can be expensive, so do it one at a time
|
|||
// for any given user to ensure that nobody can use too much of the server's resources
|
|||
Env.queueDeletes(safeKey, function (next) { |
|||
var cb = Util.both(_cb, next); |
|||
if (Env.blobStore.isFileId(channelId)) { |
|||
return void Env.removeOwnedBlob(channelId, safeKey, cb); |
|||
} |
|||
archiveOwnedChannel(Env, safeKey, channelId, cb, Server); |
|||
}); |
|||
}; |
|||
|
|||
Channel.trimHistory = function (Env, safeKey, data, cb) { |
|||
if (!(data && typeof(data.channel) === 'string' && typeof(data.hash) === 'string' && data.hash.length === 64)) { |
|||
return void cb('INVALID_ARGS'); |
|||
} |
|||
|
|||
var channelId = data.channel; |
|||
var unsafeKey = Util.unescapeKeyCharacters(safeKey); |
|||
var hash = data.hash; |
|||
|
|||
nThen(function (w) { |
|||
Metadata.getMetadata(Env, channelId, w(function (err, metadata) { |
|||
if (err) { return void cb(err); } |
|||
if (!Core.hasOwners(metadata)) { |
|||
w.abort(); |
|||
return void cb('E_NO_OWNERS'); |
|||
} |
|||
if (!Core.isOwner(metadata, unsafeKey)) { |
|||
w.abort(); |
|||
return void cb("INSUFFICIENT_PERMISSIONS"); |
|||
} |
|||
// else fall through to the next block
|
|||
})); |
|||
}).nThen(function () { |
|||
Env.msgStore.trimChannel(channelId, hash, function (err) { |
|||
if (err) { return void cb(err); } |
|||
// clear historyKeeper's cache for this channel
|
|||
Env.historyKeeper.channelClose(channelId); |
|||
cb(void 0, 'OK'); |
|||
delete Env.channel_cache[channelId]; |
|||
delete Env.metadata_cache[channelId]; |
|||
}); |
|||
}); |
|||
}; |
|||
|
|||
var ARRAY_LINE = /^\[/; |
|||
|
|||
/* Files can contain metadata but not content |
|||
call back with true if the channel log has no content other than metadata |
|||
otherwise false |
|||
*/ |
|||
Channel.isNewChannel = function (Env, channel, cb) { |
|||
if (!Core.isValidId(channel)) { return void cb('INVALID_CHAN'); } |
|||
if (channel.length !== 32) { return void cb('INVALID_CHAN'); } |
|||
|
|||
// TODO replace with readMessagesBin
|
|||
var done = false; |
|||
Env.msgStore.getMessages(channel, function (msg) { |
|||
if (done) { return; } |
|||
try { |
|||
if (typeof(msg) === 'string' && ARRAY_LINE.test(msg)) { |
|||
done = true; |
|||
return void cb(void 0, false); |
|||
} |
|||
} catch (e) { |
|||
Env.WARN('invalid message read from store', e); |
|||
} |
|||
}, function () { |
|||
if (done) { return; } |
|||
// no more messages...
|
|||
cb(void 0, true); |
|||
}); |
|||
}; |
|||
|
|||
/* writePrivateMessage |
|||
allows users to anonymously send a message to the channel |
|||
prevents their netflux-id from being stored in history |
|||
and from being broadcast to anyone that might currently be in the channel |
|||
|
|||
Otherwise behaves the same as sending to a channel |
|||
*/ |
|||
Channel.writePrivateMessage = function (Env, args, _cb, Server, netfluxId) { |
|||
var cb = Util.once(Util.mkAsync(_cb)); |
|||
|
|||
var channelId = args[0]; |
|||
var msg = args[1]; |
|||
|
|||
// don't bother handling empty messages
|
|||
if (!msg) { return void cb("INVALID_MESSAGE"); } |
|||
|
|||
// don't support anything except regular channels
|
|||
if (!Core.isValidId(channelId) || channelId.length !== 32) { |
|||
return void cb("INVALID_CHAN"); |
|||
} |
|||
|
|||
// We expect a modern netflux-websocket-server instance
|
|||
// if this API isn't here everything will fall apart anyway
|
|||
if (!(Server && typeof(Server.send) === 'function')) { |
|||
return void cb("NOT_IMPLEMENTED"); |
|||
} |
|||
|
|||
nThen(function (w) { |
|||
Metadata.getMetadataRaw(Env, channelId, w(function (err, metadata) { |
|||
if (err) { |
|||
w.abort(); |
|||
Env.Log.error('HK_WRITE_PRIVATE_MESSAGE', err); |
|||
return void cb('METADATA_ERR'); |
|||
} |
|||
|
|||
if (!metadata || !metadata.restricted) { |
|||
return; |
|||
} |
|||
|
|||
var session = HK.getNetfluxSession(Env, netfluxId); |
|||
var allowed = HK.listAllowedUsers(metadata); |
|||
|
|||
if (HK.isUserSessionAllowed(allowed, session)) { return; } |
|||
|
|||
w.abort(); |
|||
cb('INSUFFICIENT_PERMISSIONS'); |
|||
})); |
|||
}).nThen(function () { |
|||
// historyKeeper expects something with an 'id' attribute
|
|||
// it will fail unless you provide it, but it doesn't need anything else
|
|||
var channelStruct = { |
|||
id: channelId, |
|||
}; |
|||
|
|||
// construct a message to store and broadcast
|
|||
var fullMessage = [ |
|||
0, // idk
|
|||
null, // normally the netflux id, null isn't rejected, and it distinguishes messages written in this way
|
|||
"MSG", // indicate that this is a MSG
|
|||
channelId, // channel id
|
|||
msg // the actual message content. Generally a string
|
|||
]; |
|||
|
|||
|
|||
// historyKeeper already knows how to handle metadata and message validation, so we just pass it off here
|
|||
// if the message isn't valid it won't be stored.
|
|||
Env.historyKeeper.channelMessage(Server, channelStruct, fullMessage); |
|||
|
|||
Server.getChannelUserList(channelId).forEach(function (userId) { |
|||
Server.send(userId, fullMessage); |
|||
}); |
|||
|
|||
cb(); |
|||
}); |
|||
}; |
|||
|
|||
@ -0,0 +1,149 @@ |
|||
/*jshint esversion: 6 */ |
|||
/* globals process */ |
|||
const Core = module.exports; |
|||
const Util = require("../common-util"); |
|||
const escapeKeyCharacters = Util.escapeKeyCharacters; |
|||
//const { fork } = require('child_process');
|
|||
|
|||
Core.DEFAULT_LIMIT = 50 * 1024 * 1024; |
|||
Core.SESSION_EXPIRATION_TIME = 60 * 1000; |
|||
|
|||
Core.isValidId = function (chan) { |
|||
return chan && chan.length && /^[a-zA-Z0-9=+-]*$/.test(chan) && |
|||
[32, 48].indexOf(chan.length) > -1; |
|||
}; |
|||
|
|||
var makeToken = Core.makeToken = function () { |
|||
return Number(Math.floor(Math.random() * Number.MAX_SAFE_INTEGER)) |
|||
.toString(16); |
|||
}; |
|||
|
|||
Core.makeCookie = function (token) { |
|||
var time = (+new Date()); |
|||
time -= time % 5000; |
|||
|
|||
return [ |
|||
time, |
|||
process.pid, |
|||
token |
|||
]; |
|||
}; |
|||
|
|||
var parseCookie = function (cookie) { |
|||
if (!(cookie && cookie.split)) { return null; } |
|||
|
|||
var parts = cookie.split('|'); |
|||
if (parts.length !== 3) { return null; } |
|||
|
|||
var c = {}; |
|||
c.time = new Date(parts[0]); |
|||
c.pid = Number(parts[1]); |
|||
c.seq = parts[2]; |
|||
return c; |
|||
}; |
|||
|
|||
Core.getSession = function (Sessions, key) { |
|||
var safeKey = escapeKeyCharacters(key); |
|||
if (Sessions[safeKey]) { |
|||
Sessions[safeKey].atime = +new Date(); |
|||
return Sessions[safeKey]; |
|||
} |
|||
var user = Sessions[safeKey] = {}; |
|||
user.atime = +new Date(); |
|||
user.tokens = [ |
|||
makeToken() |
|||
]; |
|||
return user; |
|||
}; |
|||
|
|||
Core.expireSession = function (Sessions, safeKey) { |
|||
var session = Sessions[safeKey]; |
|||
if (!session) { return; } |
|||
if (session.blobstage) { |
|||
session.blobstage.close(); |
|||
} |
|||
delete Sessions[safeKey]; |
|||
}; |
|||
|
|||
Core.expireSessionAsync = function (Env, safeKey, cb) { |
|||
setTimeout(function () { |
|||
Core.expireSession(Env.Sessions, safeKey); |
|||
cb(void 0, 'OK'); |
|||
}); |
|||
}; |
|||
|
|||
var isTooOld = function (time, now) { |
|||
return (now - time) > 300000; |
|||
}; |
|||
|
|||
Core.expireSessions = function (Sessions) { |
|||
var now = +new Date(); |
|||
Object.keys(Sessions).forEach(function (safeKey) { |
|||
var session = Sessions[safeKey]; |
|||
if (session && isTooOld(session.atime, now)) { |
|||
Core.expireSession(Sessions, safeKey); |
|||
} |
|||
}); |
|||
}; |
|||
|
|||
var addTokenForKey = function (Sessions, publicKey, token) { |
|||
if (!Sessions[publicKey]) { throw new Error('undefined user'); } |
|||
|
|||
var user = Core.getSession(Sessions, publicKey); |
|||
user.tokens.push(token); |
|||
user.atime = +new Date(); |
|||
if (user.tokens.length > 2) { user.tokens.shift(); } |
|||
}; |
|||
|
|||
Core.isValidCookie = function (Sessions, publicKey, cookie) { |
|||
var parsed = parseCookie(cookie); |
|||
if (!parsed) { return false; } |
|||
|
|||
var now = +new Date(); |
|||
|
|||
if (!parsed.time) { return false; } |
|||
if (isTooOld(parsed.time, now)) { |
|||
return false; |
|||
} |
|||
|
|||
// different process. try harder
|
|||
if (process.pid !== parsed.pid) { |
|||
return false; |
|||
} |
|||
|
|||
var user = Core.getSession(Sessions, publicKey); |
|||
if (!user) { return false; } |
|||
|
|||
var idx = user.tokens.indexOf(parsed.seq); |
|||
if (idx === -1) { return false; } |
|||
|
|||
if (idx > 0) { |
|||
// make a new token
|
|||
addTokenForKey(Sessions, publicKey, Core.makeToken()); |
|||
} |
|||
|
|||
return true; |
|||
}; |
|||
|
|||
// E_NO_OWNERS
|
|||
Core.hasOwners = function (metadata) { |
|||
return Boolean(metadata && Array.isArray(metadata.owners)); |
|||
}; |
|||
|
|||
Core.hasPendingOwners = function (metadata) { |
|||
return Boolean(metadata && Array.isArray(metadata.pending_owners)); |
|||
}; |
|||
|
|||
// INSUFFICIENT_PERMISSIONS
|
|||
Core.isOwner = function (metadata, unsafeKey) { |
|||
return metadata.owners.indexOf(unsafeKey) !== -1; |
|||
}; |
|||
|
|||
Core.isPendingOwner = function (metadata, unsafeKey) { |
|||
return metadata.pending_owners.indexOf(unsafeKey) !== -1; |
|||
}; |
|||
|
|||
Core.haveACookie = function (Env, safeKey, cb) { |
|||
cb(); |
|||
}; |
|||
|
|||
@ -0,0 +1,189 @@ |
|||
/*jshint esversion: 6 */ |
|||
const Data = module.exports; |
|||
|
|||
const Meta = require("../metadata"); |
|||
const WriteQueue = require("../write-queue"); |
|||
const Core = require("./core"); |
|||
const Util = require("../common-util"); |
|||
const HK = require("../hk-util"); |
|||
|
|||
Data.getMetadataRaw = function (Env, channel /* channelName */, _cb) { |
|||
const cb = Util.once(Util.mkAsync(_cb)); |
|||
if (!Core.isValidId(channel)) { return void cb('INVALID_CHAN'); } |
|||
if (channel.length !== HK.STANDARD_CHANNEL_LENGTH) { return cb("INVALID_CHAN_LENGTH"); } |
|||
|
|||
var cached = Env.metadata_cache[channel]; |
|||
if (HK.isMetadataMessage(cached)) { |
|||
return void cb(void 0, cached); |
|||
} |
|||
|
|||
Env.batchMetadata(channel, cb, function (done) { |
|||
Env.computeMetadata(channel, done); |
|||
}); |
|||
}; |
|||
|
|||
Data.getMetadata = function (Env, channel, cb, Server, netfluxId) { |
|||
Data.getMetadataRaw(Env, channel, function (err, metadata) { |
|||
if (err) { return void cb(err); } |
|||
|
|||
if (!(metadata && metadata.restricted)) { |
|||
// if it's not restricted then just call back
|
|||
return void cb(void 0, metadata); |
|||
} |
|||
|
|||
const session = HK.getNetfluxSession(Env, netfluxId); |
|||
const allowed = HK.listAllowedUsers(metadata); |
|||
|
|||
if (!HK.isUserSessionAllowed(allowed, session)) { |
|||
return void cb(void 0, { |
|||
restricted: metadata.restricted, |
|||
allowed: allowed, |
|||
rejected: true, |
|||
}); |
|||
} |
|||
cb(void 0, metadata); |
|||
}); |
|||
}; |
|||
|
|||
/* setMetadata |
|||
- write a new line to the metadata log if a valid command is provided |
|||
- data is an object: { |
|||
channel: channelId, |
|||
command: metadataCommand (string), |
|||
value: value |
|||
} |
|||
*/ |
|||
var queueMetadata = WriteQueue(); |
|||
Data.setMetadata = function (Env, safeKey, data, cb, Server) { |
|||
var unsafeKey = Util.unescapeKeyCharacters(safeKey); |
|||
|
|||
var channel = data.channel; |
|||
var command = data.command; |
|||
if (!channel || !Core.isValidId(channel)) { return void cb ('INVALID_CHAN'); } |
|||
if (!command || typeof (command) !== 'string') { return void cb('INVALID_COMMAND'); } |
|||
if (Meta.commands.indexOf(command) === -1) { return void cb('UNSUPPORTED_COMMAND'); } |
|||
|
|||
queueMetadata(channel, function (next) { |
|||
Data.getMetadataRaw(Env, channel, function (err, metadata) { |
|||
if (err) { |
|||
cb(err); |
|||
return void next(); |
|||
} |
|||
if (!Core.hasOwners(metadata)) { |
|||
cb('E_NO_OWNERS'); |
|||
return void next(); |
|||
} |
|||
|
|||
// if you are a pending owner and not an owner
|
|||
// you can either ADD_OWNERS, or RM_PENDING_OWNERS
|
|||
// and you should only be able to add yourself as an owner
|
|||
// everything else should be rejected
|
|||
// else if you are not an owner
|
|||
// you should be rejected
|
|||
// else write the command
|
|||
|
|||
// Confirm that the channel is owned by the user in question
|
|||
// or the user is accepting a pending ownership offer
|
|||
if (Core.hasPendingOwners(metadata) && |
|||
Core.isPendingOwner(metadata, unsafeKey) && |
|||
!Core.isOwner(metadata, unsafeKey)) { |
|||
|
|||
// If you are a pending owner, make sure you can only add yourelf as an owner
|
|||
if ((command !== 'ADD_OWNERS' && command !== 'RM_PENDING_OWNERS') |
|||
|| !Array.isArray(data.value) |
|||
|| data.value.length !== 1 |
|||
|| data.value[0] !== unsafeKey) { |
|||
cb('INSUFFICIENT_PERMISSIONS'); |
|||
return void next(); |
|||
} |
|||
// FIXME wacky fallthrough is hard to read
|
|||
// we could pass this off to a writeMetadataCommand function
|
|||
// and make the flow easier to follow
|
|||
} else if (!Core.isOwner(metadata, unsafeKey)) { |
|||
cb('INSUFFICIENT_PERMISSIONS'); |
|||
return void next(); |
|||
} |
|||
|
|||
// Add the new metadata line
|
|||
var line = [command, data.value, +new Date()]; |
|||
var changed = false; |
|||
try { |
|||
changed = Meta.handleCommand(metadata, line); |
|||
} catch (e) { |
|||
cb(e); |
|||
return void next(); |
|||
} |
|||
|
|||
// if your command is valid but it didn't result in any change to the metadata,
|
|||
// call back now and don't write any "useless" line to the log
|
|||
if (!changed) { |
|||
cb(void 0, metadata); |
|||
return void next(); |
|||
} |
|||
Env.msgStore.writeMetadata(channel, JSON.stringify(line), function (e) { |
|||
if (e) { |
|||
cb(e); |
|||
return void next(); |
|||
} |
|||
|
|||
// send the message back to the person who changed it
|
|||
// since we know they're allowed to see it
|
|||
cb(void 0, metadata); |
|||
next(); |
|||
|
|||
const metadata_cache = Env.metadata_cache; |
|||
|
|||
// update the cached metadata
|
|||
metadata_cache[channel] = metadata; |
|||
|
|||
// it's easy to check if the channel is restricted
|
|||
const isRestricted = metadata.restricted; |
|||
// and these values will be used in any case
|
|||
const s_metadata = JSON.stringify(metadata); |
|||
const hk_id = Env.historyKeeper.id; |
|||
|
|||
if (!isRestricted) { |
|||
// pre-allow-list behaviour
|
|||
// if it's not restricted, broadcast the new metadata to everyone
|
|||
return void Server.channelBroadcast(channel, s_metadata, hk_id); |
|||
} |
|||
|
|||
// otherwise derive the list of users (unsafeKeys) that are allowed to stay
|
|||
const allowed = HK.listAllowedUsers(metadata); |
|||
// anyone who is not allowed will get the same error message
|
|||
const s_error = JSON.stringify({ |
|||
error: 'ERESTRICTED', |
|||
channel: channel, |
|||
}); |
|||
|
|||
// iterate over the channel's userlist
|
|||
const toRemove = []; |
|||
Server.getChannelUserList(channel).forEach(function (userId) { |
|||
const session = HK.getNetfluxSession(Env, userId); |
|||
|
|||
// if the user is allowed to remain, send them the metadata
|
|||
if (HK.isUserSessionAllowed(allowed, session)) { |
|||
return void Server.send(userId, [ |
|||
0, |
|||
hk_id, |
|||
'MSG', |
|||
userId, |
|||
s_metadata |
|||
], function () {}); |
|||
} |
|||
// otherwise they are not in the list.
|
|||
// send them an error and kick them out!
|
|||
Server.send(userId, [ |
|||
0, |
|||
hk_id, |
|||
'MSG', |
|||
userId, |
|||
s_error |
|||
], function () {}); |
|||
}); |
|||
|
|||
Server.removeFromChannel(channel, toRemove); |
|||
}); |
|||
}); |
|||
}); |
|||
}; |
|||
@ -0,0 +1,298 @@ |
|||
/*jshint esversion: 6 */ |
|||
const Core = require("./core"); |
|||
|
|||
const Pinning = module.exports; |
|||
const Util = require("../common-util"); |
|||
const nThen = require("nthen"); |
|||
|
|||
//const escapeKeyCharacters = Util.escapeKeyCharacters;
|
|||
const unescapeKeyCharacters = Util.unescapeKeyCharacters; |
|||
|
|||
var sumChannelSizes = function (sizes) { |
|||
return Object.keys(sizes).map(function (id) { return sizes[id]; }) |
|||
.filter(function (x) { |
|||
// only allow positive numbers
|
|||
return !(typeof(x) !== 'number' || x <= 0); |
|||
}) |
|||
.reduce(function (a, b) { return a + b; }, 0); |
|||
}; |
|||
|
|||
// FIXME it's possible for this to respond before the server has had a chance
|
|||
// to fetch the limits. Maybe we should respond with an error...
|
|||
// or wait until we actually know the limits before responding
|
|||
var getLimit = Pinning.getLimit = function (Env, safeKey, cb) { |
|||
var unsafeKey = unescapeKeyCharacters(safeKey); |
|||
var limit = Env.limits[unsafeKey]; |
|||
var defaultLimit = typeof(Env.defaultStorageLimit) === 'number'? |
|||
Env.defaultStorageLimit: Core.DEFAULT_LIMIT; |
|||
|
|||
var toSend = limit && typeof(limit.limit) === "number"? |
|||
[limit.limit, limit.plan, limit.note] : [defaultLimit, '', '']; |
|||
|
|||
cb(void 0, toSend); |
|||
}; |
|||
|
|||
var getMultipleFileSize = function (Env, channels, cb) { |
|||
Env.getMultipleFileSize(channels, cb); |
|||
}; |
|||
|
|||
var loadUserPins = function (Env, safeKey, cb) { |
|||
var session = Core.getSession(Env.Sessions, safeKey); |
|||
|
|||
if (session.channels) { |
|||
return cb(session.channels); |
|||
} |
|||
|
|||
Env.batchUserPins(safeKey, cb, function (done) { |
|||
Env.getPinState(safeKey, function (err, value) { |
|||
if (!err) { |
|||
// only put this into the cache if it completes
|
|||
session.channels = value; |
|||
} |
|||
done(value); |
|||
}); |
|||
}); |
|||
}; |
|||
|
|||
var truthyKeys = function (O) { |
|||
try { |
|||
return Object.keys(O).filter(function (k) { |
|||
return O[k]; |
|||
}); |
|||
} catch (err) { |
|||
return []; |
|||
} |
|||
}; |
|||
|
|||
var getChannelList = Pinning.getChannelList = function (Env, safeKey, _cb) { |
|||
var cb = Util.once(Util.mkAsync(_cb)); |
|||
loadUserPins(Env, safeKey, function (pins) { |
|||
cb(truthyKeys(pins)); |
|||
}); |
|||
}; |
|||
|
|||
Pinning.getTotalSize = function (Env, safeKey, cb) { |
|||
var unsafeKey = unescapeKeyCharacters(safeKey); |
|||
var limit = Env.limits[unsafeKey]; |
|||
|
|||
// Get a common key if multiple users share the same quota, otherwise take the public key
|
|||
var batchKey = (limit && Array.isArray(limit.users)) ? limit.users.join('') : safeKey; |
|||
|
|||
Env.batchTotalSize(batchKey, cb, function (done) { |
|||
var channels = []; |
|||
|
|||
var addUnique = function (channel) { |
|||
if (channels.indexOf(channel) !== -1) { return; } |
|||
channels.push(channel); |
|||
}; |
|||
|
|||
nThen(function (waitFor) { |
|||
// Get the channels list for our user account
|
|||
getChannelList(Env, safeKey, waitFor(function (_channels) { |
|||
if (!_channels) { |
|||
waitFor.abort(); |
|||
return done('INVALID_PIN_LIST'); |
|||
} |
|||
_channels.forEach(addUnique); |
|||
})); |
|||
// Get the channels list for users sharing our quota
|
|||
if (limit && Array.isArray(limit.users) && limit.users.length > 1) { |
|||
limit.users.forEach(function (key) { |
|||
if (key === unsafeKey) { return; } // Don't count ourselves twice
|
|||
getChannelList(Env, key, waitFor(function (_channels) { |
|||
if (!_channels) { return; } // Broken user, don't count their quota
|
|||
_channels.forEach(addUnique); |
|||
})); |
|||
}); |
|||
} |
|||
}).nThen(function () { |
|||
Env.getTotalSize(channels, done); |
|||
}); |
|||
}); |
|||
}; |
|||
|
|||
/* Users should be able to clear their own pin log with an authenticated RPC |
|||
*/ |
|||
Pinning.removePins = function (Env, safeKey, cb) { |
|||
Env.pinStore.removeChannel(safeKey, function (err) { |
|||
Env.Log.info('DELETION_PIN_BY_OWNER_RPC', { |
|||
safeKey: safeKey, |
|||
status: err? String(err): 'SUCCESS', |
|||
}); |
|||
|
|||
if (err) { return void cb(err); } |
|||
cb(void 0, 'OK'); |
|||
}); |
|||
}; |
|||
|
|||
Pinning.trimPins = function (Env, safeKey, cb) { |
|||
cb("NOT_IMPLEMENTED"); |
|||
}; |
|||
|
|||
var getFreeSpace = Pinning.getFreeSpace = function (Env, safeKey, cb) { |
|||
getLimit(Env, safeKey, function (e, limit) { |
|||
if (e) { return void cb(e); } |
|||
Pinning.getTotalSize(Env, safeKey, function (e, size) { |
|||
if (typeof(size) === 'undefined') { return void cb(e); } |
|||
|
|||
var rem = limit[0] - size; |
|||
if (typeof(rem) !== 'number') { |
|||
return void cb('invalid_response'); |
|||
} |
|||
cb(void 0, rem); |
|||
}); |
|||
}); |
|||
}; |
|||
|
|||
var getHash = Pinning.getHash = function (Env, safeKey, cb) { |
|||
getChannelList(Env, safeKey, function (channels) { |
|||
Env.hashChannelList(channels, cb); |
|||
}); |
|||
}; |
|||
|
|||
Pinning.pinChannel = function (Env, safeKey, channels, cb) { |
|||
if (!channels && channels.filter) { |
|||
return void cb('INVALID_PIN_LIST'); |
|||
} |
|||
|
|||
// get channel list ensures your session has a cached channel list
|
|||
getChannelList(Env, safeKey, function (pinned) { |
|||
var session = Core.getSession(Env.Sessions, safeKey); |
|||
|
|||
// only pin channels which are not already pinned
|
|||
var toStore = channels.filter(function (channel) { |
|||
return pinned.indexOf(channel) === -1; |
|||
}); |
|||
|
|||
if (toStore.length === 0) { |
|||
return void getHash(Env, safeKey, cb); |
|||
} |
|||
|
|||
getMultipleFileSize(Env, toStore, function (e, sizes) { |
|||
if (typeof(sizes) === 'undefined') { return void cb(e); } |
|||
var pinSize = sumChannelSizes(sizes); |
|||
|
|||
getFreeSpace(Env, safeKey, function (e, free) { |
|||
if (typeof(free) === 'undefined') { |
|||
Env.WARN('getFreeSpace', e); |
|||
return void cb(e); |
|||
} |
|||
if (pinSize > free) { return void cb('E_OVER_LIMIT'); } |
|||
|
|||
Env.pinStore.message(safeKey, JSON.stringify(['PIN', toStore, +new Date()]), |
|||
function (e) { |
|||
if (e) { return void cb(e); } |
|||
toStore.forEach(function (channel) { |
|||
session.channels[channel] = true; |
|||
}); |
|||
getHash(Env, safeKey, cb); |
|||
}); |
|||
}); |
|||
}); |
|||
}); |
|||
}; |
|||
|
|||
Pinning.unpinChannel = function (Env, safeKey, channels, cb) { |
|||
if (!channels && channels.filter) { |
|||
// expected array
|
|||
return void cb('INVALID_PIN_LIST'); |
|||
} |
|||
|
|||
getChannelList(Env, safeKey, function (pinned) { |
|||
var session = Core.getSession(Env.Sessions, safeKey); |
|||
|
|||
// only unpin channels which are pinned
|
|||
var toStore = channels.filter(function (channel) { |
|||
return pinned.indexOf(channel) !== -1; |
|||
}); |
|||
|
|||
if (toStore.length === 0) { |
|||
return void getHash(Env, safeKey, cb); |
|||
} |
|||
|
|||
Env.pinStore.message(safeKey, JSON.stringify(['UNPIN', toStore, +new Date()]), |
|||
function (e) { |
|||
if (e) { return void cb(e); } |
|||
toStore.forEach(function (channel) { |
|||
delete session.channels[channel]; |
|||
}); |
|||
getHash(Env, safeKey, cb); |
|||
}); |
|||
}); |
|||
}; |
|||
|
|||
Pinning.resetUserPins = function (Env, safeKey, channelList, cb) { |
|||
if (!Array.isArray(channelList)) { return void cb('INVALID_PIN_LIST'); } |
|||
var session = Core.getSession(Env.Sessions, safeKey); |
|||
|
|||
if (!channelList.length) { |
|||
return void getHash(Env, safeKey, function (e, hash) { |
|||
if (e) { return cb(e); } |
|||
cb(void 0, hash); |
|||
}); |
|||
} |
|||
|
|||
var pins = {}; |
|||
getMultipleFileSize(Env, channelList, function (e, sizes) { |
|||
if (typeof(sizes) === 'undefined') { return void cb(e); } |
|||
var pinSize = sumChannelSizes(sizes); |
|||
|
|||
|
|||
getLimit(Env, safeKey, function (e, limit) { |
|||
if (e) { |
|||
Env.WARN('[RESET_ERR]', e); |
|||
return void cb(e); |
|||
} |
|||
|
|||
/* we want to let people pin, even if they are over their limit, |
|||
but they should only be able to do this once. |
|||
|
|||
This prevents data loss in the case that someone registers, but |
|||
does not have enough free space to pin their migrated data. |
|||
|
|||
They will not be able to pin additional pads until they upgrade |
|||
or delete enough files to go back under their limit. */ |
|||
if (pinSize > limit[0] && session.hasPinned) { return void(cb('E_OVER_LIMIT')); } |
|||
Env.pinStore.message(safeKey, JSON.stringify(['RESET', channelList, +new Date()]), |
|||
function (e) { |
|||
if (e) { return void cb(e); } |
|||
channelList.forEach(function (channel) { |
|||
pins[channel] = true; |
|||
}); |
|||
|
|||
var oldChannels; |
|||
if (session.channels && typeof(session.channels) === 'object') { |
|||
oldChannels = Object.keys(session.channels); |
|||
} else { |
|||
oldChannels = []; |
|||
} |
|||
|
|||
// update in-memory cache IFF the reset was allowed.
|
|||
session.channels = pins; |
|||
getHash(Env, safeKey, function (e, hash) { |
|||
cb(e, hash); |
|||
}); |
|||
}); |
|||
}); |
|||
}); |
|||
}; |
|||
|
|||
Pinning.getFileSize = function (Env, channel, cb) { |
|||
Env.getFileSize(channel, cb); |
|||
}; |
|||
|
|||
/* accepts a list, and returns a sublist of channel or file ids which seem |
|||
to have been deleted from the server (file size 0) |
|||
|
|||
we might consider that we should only say a file is gone if fs.stat returns |
|||
ENOENT, but for now it's simplest to just rely on getFileSize... |
|||
*/ |
|||
Pinning.getDeletedPads = function (Env, channels, cb) { |
|||
Env.getDeletedPads(channels, cb); |
|||
}; |
|||
|
|||
// FIXME this will be removed from the client
|
|||
Pinning.isChannelPinned = function (Env, channel, cb) { |
|||
return void cb(void 0, true); |
|||
}; |
|||
|
|||
@ -0,0 +1,107 @@ |
|||
/*jshint esversion: 6 */ |
|||
/* globals Buffer*/ |
|||
const Quota = module.exports; |
|||
|
|||
const Util = require("../common-util"); |
|||
const Package = require('../../package.json'); |
|||
const Https = require("https"); |
|||
|
|||
Quota.applyCustomLimits = function (Env) { |
|||
var isLimit = function (o) { |
|||
var valid = o && typeof(o) === 'object' && |
|||
typeof(o.limit) === 'number' && |
|||
typeof(o.plan) === 'string' && |
|||
typeof(o.note) === 'string'; |
|||
return valid; |
|||
}; |
|||
|
|||
// read custom limits from the Environment (taken from config)
|
|||
var customLimits = (function (custom) { |
|||
var limits = {}; |
|||
Object.keys(custom).forEach(function (k) { |
|||
k.replace(/\/([^\/]+)$/, function (all, safeKey) { |
|||
var id = Util.unescapeKeyCharacters(safeKey || ''); |
|||
limits[id] = custom[k]; |
|||
return ''; |
|||
}); |
|||
}); |
|||
return limits; |
|||
}(Env.customLimits || {})); |
|||
|
|||
Object.keys(customLimits).forEach(function (k) { |
|||
if (!isLimit(customLimits[k])) { return; } |
|||
Env.limits[k] = customLimits[k]; |
|||
}); |
|||
}; |
|||
|
|||
Quota.updateCachedLimits = function (Env, cb) { |
|||
Quota.applyCustomLimits(Env); |
|||
if (Env.blockDailyCheck === true || |
|||
(typeof(Env.blockDailyCheck) === 'undefined' && Env.adminEmail === false && Env.allowSubscriptions === false)) { |
|||
return void cb(); |
|||
} |
|||
|
|||
var body = JSON.stringify({ |
|||
domain: Env.myDomain, |
|||
subdomain: Env.mySubdomain || null, |
|||
adminEmail: Env.adminEmail, |
|||
version: Package.version |
|||
}); |
|||
var options = { |
|||
host: 'accounts.cryptpad.fr', |
|||
path: '/api/getauthorized', |
|||
method: 'POST', |
|||
headers: { |
|||
"Content-Type": "application/json", |
|||
"Content-Length": Buffer.byteLength(body) |
|||
} |
|||
}; |
|||
|
|||
var req = Https.request(options, function (response) { |
|||
if (!('' + response.statusCode).match(/^2\d\d$/)) { |
|||
return void cb('SERVER ERROR ' + response.statusCode); |
|||
} |
|||
var str = ''; |
|||
|
|||
response.on('data', function (chunk) { |
|||
str += chunk; |
|||
}); |
|||
|
|||
response.on('end', function () { |
|||
try { |
|||
var json = JSON.parse(str); |
|||
Env.limits = json; |
|||
Quota.applyCustomLimits(Env); |
|||
cb(void 0); |
|||
} catch (e) { |
|||
cb(e); |
|||
} |
|||
}); |
|||
}); |
|||
|
|||
req.on('error', function (e) { |
|||
Quota.applyCustomLimits(Env); |
|||
if (!Env.myDomain) { return cb(); } |
|||
// only return an error if your server allows subscriptions
|
|||
cb(e); |
|||
}); |
|||
|
|||
req.end(body); |
|||
}; |
|||
|
|||
// The limits object contains storage limits for all the publicKey that have paid
|
|||
// To each key is associated an object containing the 'limit' value and a 'note' explaining that limit
|
|||
Quota.getUpdatedLimit = function (Env, safeKey, cb) { // FIXME BATCH?S
|
|||
Quota.updateCachedLimits(Env, function (err) { |
|||
if (err) { return void cb(err); } |
|||
|
|||
var limit = Env.limits[safeKey]; |
|||
|
|||
if (limit && typeof(limit.limit) === 'number') { |
|||
return void cb(void 0, [limit.limit, limit.plan, limit.note]); |
|||
} |
|||
|
|||
return void cb(void 0, [Env.defaultStorageLimit, '', '']); |
|||
}); |
|||
}; |
|||
|
|||
@ -0,0 +1,89 @@ |
|||
/*jshint esversion: 6 */ |
|||
const Upload = module.exports; |
|||
const Util = require("../common-util"); |
|||
const Pinning = require("./pin-rpc"); |
|||
const nThen = require("nthen"); |
|||
const Core = require("./core"); |
|||
|
|||
Upload.status = function (Env, safeKey, filesize, _cb) { // FIXME FILES
|
|||
var cb = Util.once(Util.mkAsync(_cb)); |
|||
|
|||
// validate that the provided size is actually a positive number
|
|||
if (typeof(filesize) !== 'number' && |
|||
filesize >= 0) { return void cb('E_INVALID_SIZE'); } |
|||
|
|||
nThen(function (w) { |
|||
// if the proposed upload size is within the regular limit
|
|||
// jump ahead to the next block
|
|||
if (filesize <= Env.maxUploadSize) { return; } |
|||
|
|||
// if larger uploads aren't explicitly enabled then reject them
|
|||
if (typeof(Env.premiumUploadSize) !== 'number') { |
|||
w.abort(); |
|||
return void cb('TOO_LARGE'); |
|||
} |
|||
|
|||
// otherwise go and retrieve info about the user's quota
|
|||
Pinning.getLimit(Env, safeKey, w(function (err, limit) { |
|||
if (err) { |
|||
w.abort(); |
|||
return void cb("E_BAD_LIMIT"); |
|||
} |
|||
|
|||
var plan = limit[1]; |
|||
|
|||
// see if they have a special plan, reject them if not
|
|||
if (plan === '') { |
|||
w.abort(); |
|||
return void cb('TOO_LARGE'); |
|||
} |
|||
|
|||
// and that they're not over the greater limit
|
|||
if (filesize >= Env.premiumUploadSize) { |
|||
w.abort(); |
|||
return void cb("TOO_LARGE"); |
|||
} |
|||
|
|||
// fallthrough will proceed to the next block
|
|||
})); |
|||
}).nThen(function (w) { |
|||
var abortAndCB = Util.both(w.abort, cb); |
|||
Env.blobStore.status(safeKey, w(function (err, inProgress) { |
|||
// if there's an error something is weird
|
|||
if (err) { return void abortAndCB(err); } |
|||
|
|||
// we cannot upload two things at once
|
|||
if (inProgress) { return void abortAndCB(void 0, true); } |
|||
})); |
|||
}).nThen(function () { |
|||
// if yuo're here then there are no pending uploads
|
|||
// check if you have space in your quota to upload something of this size
|
|||
Pinning.getFreeSpace(Env, safeKey, function (e, free) { |
|||
if (e) { return void cb(e); } |
|||
if (filesize >= free) { return cb('NOT_ENOUGH_SPACE'); } |
|||
|
|||
var user = Core.getSession(Env.Sessions, safeKey); |
|||
user.pendingUploadSize = filesize; |
|||
user.currentUploadSize = 0; |
|||
|
|||
cb(void 0, false); |
|||
}); |
|||
}); |
|||
}; |
|||
|
|||
Upload.upload = function (Env, safeKey, chunk, cb) { |
|||
Env.blobStore.upload(safeKey, chunk, cb); |
|||
}; |
|||
|
|||
Upload.complete = function (Env, safeKey, arg, cb) { |
|||
Env.blobStore.complete(safeKey, arg, cb); |
|||
}; |
|||
|
|||
Upload.cancel = function (Env, safeKey, arg, cb) { |
|||
Env.blobStore.cancel(safeKey, arg, cb); |
|||
}; |
|||
|
|||
Upload.complete_owned = function (Env, safeKey, arg, cb) { |
|||
Env.blobStore.completeOwned(safeKey, arg, cb); |
|||
}; |
|||
|
|||
@ -1,11 +0,0 @@ |
|||
// remove duplicate elements in an array
|
|||
module.exports = function (O) { |
|||
// make a copy of the original array
|
|||
var A = O.slice(); |
|||
for (var i = 0; i < A.length; i++) { |
|||
for (var j = i + 1; j < A.length; j++) { |
|||
if (A[i] === A[j]) { A.splice(j--, 1); } |
|||
} |
|||
} |
|||
return A; |
|||
}; |
|||
@ -0,0 +1,86 @@ |
|||
var Default = module.exports; |
|||
|
|||
Default.commonCSP = function (domain) { |
|||
domain = ' ' + domain; |
|||
// Content-Security-Policy
|
|||
|
|||
return [ |
|||
"default-src 'none'", |
|||
"style-src 'unsafe-inline' 'self' " + domain, |
|||
"font-src 'self' data:" + domain, |
|||
|
|||
/* child-src is used to restrict iframes to a set of allowed domains. |
|||
* connect-src is used to restrict what domains can connect to the websocket. |
|||
* |
|||
* it is recommended that you configure these fields to match the |
|||
* domain which will serve your CryptPad instance. |
|||
*/ |
|||
"child-src blob: *", |
|||
// IE/Edge
|
|||
"frame-src blob: *", |
|||
|
|||
/* this allows connections over secure or insecure websockets |
|||
if you are deploying to production, you'll probably want to remove |
|||
the ws://* directive, and change '*' to your domain
|
|||
*/ |
|||
"connect-src 'self' ws: wss: blob:" + domain, |
|||
|
|||
// data: is used by codemirror
|
|||
"img-src 'self' data: blob:" + domain, |
|||
"media-src * blob:", |
|||
|
|||
// for accounts.cryptpad.fr authentication and cross-domain iframe sandbox
|
|||
"frame-ancestors *", |
|||
"" |
|||
]; |
|||
}; |
|||
|
|||
Default.contentSecurity = function (domain) { |
|||
return (Default.commonCSP(domain).join('; ') + "script-src 'self' resource: " + domain).replace(/\s+/g, ' '); |
|||
}; |
|||
|
|||
Default.padContentSecurity = function (domain) { |
|||
return (Default.commonCSP(domain).join('; ') + "script-src 'self' 'unsafe-eval' 'unsafe-inline' resource: " + domain).replace(/\s+/g, ' '); |
|||
}; |
|||
|
|||
Default.httpHeaders = function () { |
|||
return { |
|||
"X-XSS-Protection": "1; mode=block", |
|||
"X-Content-Type-Options": "nosniff", |
|||
"Access-Control-Allow-Origin": "*" |
|||
}; |
|||
}; |
|||
|
|||
Default.mainPages = function () { |
|||
return [ |
|||
'index', |
|||
'privacy', |
|||
'terms', |
|||
'about', |
|||
'contact', |
|||
'what-is-cryptpad', |
|||
'features', |
|||
'faq', |
|||
'maintenance' |
|||
]; |
|||
}; |
|||
|
|||
/* By default the CryptPad server will run scheduled tasks every five minutes |
|||
* If you want to run scheduled tasks in a separate process (like a crontab) |
|||
* you can disable this behaviour by setting the following value to true |
|||
*/ |
|||
//disableIntegratedTasks: false,
|
|||
|
|||
/* CryptPad's file storage adaptor closes unused files after a configurable |
|||
* number of milliseconds (default 30000 (30 seconds)) |
|||
*/ |
|||
// channelExpirationMs: 30000,
|
|||
|
|||
/* CryptPad's file storage adaptor is limited by the number of open files. |
|||
* When the adaptor reaches openFileLimit, it will clean up older files |
|||
*/ |
|||
//openFileLimit: 2048,
|
|||
|
|||
|
|||
|
|||
|
|||
@ -0,0 +1,285 @@ |
|||
/* jshint esversion: 6 */ |
|||
|
|||
const nThen = require('nthen'); |
|||
const Crypto = require('crypto'); |
|||
const WriteQueue = require("./write-queue"); |
|||
const BatchRead = require("./batch-read"); |
|||
const RPC = require("./rpc"); |
|||
const HK = require("./hk-util.js"); |
|||
const Core = require("./commands/core"); |
|||
|
|||
const Store = require("./storage/file"); |
|||
const BlobStore = require("./storage/blob"); |
|||
const Workers = require("./workers/index"); |
|||
|
|||
module.exports.create = function (config, cb) { |
|||
const Log = config.log; |
|||
var WARN = function (e, output) { |
|||
if (e && output) { |
|||
Log.warn(e, { |
|||
output: output, |
|||
message: String(e), |
|||
stack: new Error(e).stack, |
|||
}); |
|||
} |
|||
}; |
|||
|
|||
Log.silly('HK_LOADING', 'LOADING HISTORY_KEEPER MODULE'); |
|||
|
|||
// TODO populate Env with everything that you use from config
|
|||
// so that you can stop passing around your raw config
|
|||
// and more easily share state between historyKeeper and rpc
|
|||
const Env = { |
|||
Log: Log, |
|||
// store
|
|||
id: Crypto.randomBytes(8).toString('hex'), |
|||
|
|||
metadata_cache: {}, |
|||
channel_cache: {}, |
|||
queueStorage: WriteQueue(), |
|||
queueDeletes: WriteQueue(), |
|||
|
|||
batchIndexReads: BatchRead("HK_GET_INDEX"), |
|||
batchMetadata: BatchRead('GET_METADATA'), |
|||
batchRegisteredUsers: BatchRead("GET_REGISTERED_USERS"), |
|||
batchDiskUsage: BatchRead('GET_DISK_USAGE'), |
|||
batchUserPins: BatchRead('LOAD_USER_PINS'), |
|||
batchTotalSize: BatchRead('GET_TOTAL_SIZE'), |
|||
|
|||
//historyKeeper: config.historyKeeper,
|
|||
intervals: config.intervals || {}, |
|||
maxUploadSize: config.maxUploadSize || (20 * 1024 * 1024), |
|||
premiumUploadSize: false, // overridden below...
|
|||
Sessions: {}, |
|||
paths: {}, |
|||
//msgStore: config.store,
|
|||
|
|||
netfluxUsers: {}, |
|||
|
|||
pinStore: undefined, |
|||
pinnedPads: {}, |
|||
pinsLoaded: false, |
|||
pendingPinInquiries: {}, |
|||
pendingUnpins: {}, |
|||
pinWorkers: 5, |
|||
|
|||
limits: {}, |
|||
admins: [], |
|||
WARN: WARN, |
|||
flushCache: config.flushCache, |
|||
adminEmail: config.adminEmail, |
|||
allowSubscriptions: config.allowSubscriptions === true, |
|||
blockDailyCheck: config.blockDailyCheck === true, |
|||
|
|||
myDomain: config.myDomain, |
|||
mySubdomain: config.mySubdomain, // only exists for the accounts integration
|
|||
customLimits: config.customLimits || {}, |
|||
// FIXME this attribute isn't in the default conf
|
|||
// but it is referenced in Quota
|
|||
domain: config.domain |
|||
}; |
|||
|
|||
(function () { |
|||
var pes = config.premiumUploadSize; |
|||
if (!isNaN(pes) && pes >= Env.maxUploadSize) { |
|||
Env.premiumUploadSize = pes; |
|||
} |
|||
}()); |
|||
|
|||
var paths = Env.paths; |
|||
|
|||
var keyOrDefaultString = function (key, def) { |
|||
return typeof(config[key]) === 'string'? config[key]: def; |
|||
}; |
|||
|
|||
var pinPath = paths.pin = keyOrDefaultString('pinPath', './pins'); |
|||
paths.block = keyOrDefaultString('blockPath', './block'); |
|||
paths.data = keyOrDefaultString('filePath', './datastore'); |
|||
paths.staging = keyOrDefaultString('blobStagingPath', './blobstage'); |
|||
paths.blob = keyOrDefaultString('blobPath', './blob'); |
|||
|
|||
Env.defaultStorageLimit = typeof(config.defaultStorageLimit) === 'number' && config.defaultStorageLimit >= 0? |
|||
config.defaultStorageLimit: |
|||
Core.DEFAULT_LIMIT; |
|||
|
|||
try { |
|||
Env.admins = (config.adminKeys || []).map(function (k) { |
|||
k = k.replace(/\/+$/, ''); |
|||
var s = k.split('/'); |
|||
return s[s.length-1]; |
|||
}); |
|||
} catch (e) { |
|||
console.error("Can't parse admin keys. Please update or fix your config.js file!"); |
|||
} |
|||
|
|||
config.historyKeeper = Env.historyKeeper = { |
|||
metadata_cache: Env.metadata_cache, |
|||
channel_cache: Env.channel_cache, |
|||
|
|||
id: Env.id, |
|||
|
|||
channelMessage: function (Server, channel, msgStruct) { |
|||
// netflux-server emits 'channelMessage' events whenever someone broadcasts to a channel
|
|||
// historyKeeper stores these messages if the channel id indicates that they are
|
|||
// a channel type with permanent history
|
|||
HK.onChannelMessage(Env, Server, channel, msgStruct); |
|||
}, |
|||
channelClose: function (channelName) { |
|||
// netflux-server emits 'channelClose' events whenever everyone leaves a channel
|
|||
// we drop cached metadata and indexes at the same time
|
|||
HK.dropChannel(Env, channelName); |
|||
}, |
|||
channelOpen: function (Server, channelName, userId, wait) { |
|||
Env.channel_cache[channelName] = Env.channel_cache[channelName] || {}; |
|||
|
|||
var sendHKJoinMessage = function () { |
|||
Server.send(userId, [ |
|||
0, |
|||
Env.id, |
|||
'JOIN', |
|||
channelName |
|||
]); |
|||
}; |
|||
|
|||
// a little backwards compatibility in case you don't have the latest server
|
|||
// allow lists won't work unless you update, though
|
|||
if (typeof(wait) !== 'function') { return void sendHKJoinMessage(); } |
|||
|
|||
var next = wait(); |
|||
var cb = function (err, info) { |
|||
next(err, info, sendHKJoinMessage); |
|||
}; |
|||
|
|||
// only conventional channels can be restricted
|
|||
if ((channelName || "").length !== HK.STANDARD_CHANNEL_LENGTH) { |
|||
return void cb(); |
|||
} |
|||
|
|||
// gets and caches the metadata...
|
|||
HK.getMetadata(Env, channelName, function (err, metadata) { |
|||
if (err) { |
|||
Log.error('HK_METADATA_ERR', { |
|||
channel: channelName, |
|||
error: err, |
|||
}); |
|||
} |
|||
if (!metadata || (metadata && !metadata.restricted)) { |
|||
// the channel doesn't have metadata, or it does and it's not restricted
|
|||
// either way, let them join.
|
|||
return void cb(); |
|||
} |
|||
|
|||
// this channel is restricted. verify that the user in question is in the allow list
|
|||
|
|||
// construct a definitive list (owners + allowed)
|
|||
var allowed = HK.listAllowedUsers(metadata); |
|||
// and get the list of keys for which this user has already authenticated
|
|||
var session = HK.getNetfluxSession(Env, userId); |
|||
|
|||
if (HK.isUserSessionAllowed(allowed, session)) { |
|||
return void cb(); |
|||
} |
|||
|
|||
// otherwise they're not allowed.
|
|||
// respond with a special error that includes the list of keys
|
|||
// which would be allowed...
|
|||
// FIXME RESTRICT bonus points if you hash the keys to limit data exposure
|
|||
cb("ERESTRICTED", allowed); |
|||
}); |
|||
}, |
|||
sessionClose: function (userId, reason) { |
|||
HK.closeNetfluxSession(Env, userId); |
|||
if (['BAD_MESSAGE', 'SEND_MESSAGE_FAIL_2'].indexOf(reason) !== -1) { |
|||
if (reason && reason.code === 'ECONNRESET') { return; } |
|||
return void Log.error('SESSION_CLOSE_WITH_ERROR', { |
|||
userId: userId, |
|||
reason: reason, |
|||
}); |
|||
} |
|||
|
|||
if (['SOCKET_CLOSED', 'SOCKET_ERROR'].indexOf(reason)) { return; } |
|||
Log.verbose('SESSION_CLOSE_ROUTINE', { |
|||
userId: userId, |
|||
reason: reason, |
|||
}); |
|||
}, |
|||
directMessage: function (Server, seq, userId, json) { |
|||
// netflux-server allows you to register an id with a handler
|
|||
// this handler is invoked every time someone sends a message to that id
|
|||
HK.onDirectMessage(Env, Server, seq, userId, json); |
|||
}, |
|||
}; |
|||
|
|||
Log.verbose('HK_ID', 'History keeper ID: ' + Env.id); |
|||
|
|||
nThen(function (w) { |
|||
// create a pin store
|
|||
Store.create({ |
|||
filePath: pinPath, |
|||
}, w(function (err, s) { |
|||
if (err) { throw err; } |
|||
Env.pinStore = s; |
|||
})); |
|||
|
|||
// create a channel store
|
|||
Store.create(config, w(function (err, _store) { |
|||
if (err) { throw err; } |
|||
config.store = _store; |
|||
Env.msgStore = _store; // API used by rpc
|
|||
Env.store = _store; // API used by historyKeeper
|
|||
})); |
|||
|
|||
// create a blob store
|
|||
BlobStore.create({ |
|||
blobPath: config.blobPath, |
|||
blobStagingPath: config.blobStagingPath, |
|||
archivePath: config.archivePath, |
|||
getSession: function (safeKey) { |
|||
return Core.getSession(Env.Sessions, safeKey); |
|||
}, |
|||
}, w(function (err, blob) { |
|||
if (err) { throw new Error(err); } |
|||
Env.blobStore = blob; |
|||
})); |
|||
}).nThen(function (w) { |
|||
Workers.initialize(Env, { |
|||
blobPath: config.blobPath, |
|||
blobStagingPath: config.blobStagingPath, |
|||
taskPath: config.taskPath, |
|||
pinPath: pinPath, |
|||
filePath: config.filePath, |
|||
archivePath: config.archivePath, |
|||
channelExpirationMs: config.channelExpirationMs, |
|||
verbose: config.verbose, |
|||
openFileLimit: config.openFileLimit, |
|||
|
|||
maxWorkers: config.maxWorkers, |
|||
}, w(function (err) { |
|||
if (err) { |
|||
throw new Error(err); |
|||
} |
|||
})); |
|||
}).nThen(function () { |
|||
if (config.disableIntegratedTasks) { return; } |
|||
config.intervals = config.intervals || {}; |
|||
|
|||
var tasks_running; |
|||
config.intervals.taskExpiration = setInterval(function () { |
|||
if (tasks_running) { return; } |
|||
tasks_running = true; |
|||
Env.runTasks(function (err) { |
|||
if (err) { |
|||
Log.error('TASK_RUNNER_ERR', err); |
|||
} |
|||
tasks_running = false; |
|||
}); |
|||
}, 1000 * 60 * 5); // run every five minutes
|
|||
}).nThen(function () { |
|||
RPC.create(Env, function (err, _rpc) { |
|||
if (err) { throw err; } |
|||
|
|||
Env.rpc = _rpc; |
|||
cb(void 0, config.historyKeeper); |
|||
}); |
|||
}); |
|||
}; |
|||
@ -0,0 +1,910 @@ |
|||
/* jshint esversion: 6 */ |
|||
/* global Buffer */ |
|||
var HK = module.exports; |
|||
|
|||
const nThen = require('nthen'); |
|||
const Util = require("./common-util"); |
|||
const MetaRPC = require("./commands/metadata"); |
|||
const Nacl = require('tweetnacl/nacl-fast'); |
|||
const now = function () { return (new Date()).getTime(); }; |
|||
const ONE_DAY = 1000 * 60 * 60 * 24; // one day in milliseconds
|
|||
|
|||
/* getHash |
|||
* this function slices off the leading portion of a message which is |
|||
most likely unique |
|||
* these "hashes" are used to identify particular messages in a channel's history |
|||
* clients store "hashes" either in memory or in their drive to query for new messages: |
|||
* when reconnecting to a pad |
|||
* when connecting to chat or a mailbox |
|||
* thus, we can't change this function without invalidating client data which: |
|||
* is encrypted clientside |
|||
* can't be easily migrated |
|||
* don't break it! |
|||
*/ |
|||
const getHash = HK.getHash = function (msg, Log) { |
|||
if (typeof(msg) !== 'string') { |
|||
if (Log) { |
|||
Log.warn('HK_GET_HASH', 'getHash() called on ' + typeof(msg) + ': ' + msg); |
|||
} |
|||
return ''; |
|||
} |
|||
return msg.slice(0,64); |
|||
}; |
|||
|
|||
// historyKeeper should explicitly store any channel
|
|||
// with a 32 character id
|
|||
const STANDARD_CHANNEL_LENGTH = HK.STANDARD_CHANNEL_LENGTH = 32; |
|||
|
|||
// historyKeeper should not store messages sent to any channel
|
|||
// with a 34 character id
|
|||
const EPHEMERAL_CHANNEL_LENGTH = HK.EPHEMERAL_CHANNEL_LENGTH = 34; |
|||
|
|||
const tryParse = HK.tryParse = function (Env, str) { |
|||
try { |
|||
return JSON.parse(str); |
|||
} catch (err) { |
|||
Env.Log.error('HK_PARSE_ERROR', { |
|||
message: err && err.name, |
|||
input: str, |
|||
}); |
|||
} |
|||
}; |
|||
|
|||
/* sliceCpIndex |
|||
returns a list of all checkpoints which might be relevant for a client connecting to a session |
|||
|
|||
* if there are two or fewer checkpoints, return everything you have |
|||
* if there are more than two |
|||
* return at least two |
|||
* plus any more which were received within the last 100 messages |
|||
|
|||
This is important because the additional history is what prevents |
|||
clients from forking on checkpoints and dropping forked history. |
|||
|
|||
*/ |
|||
const sliceCpIndex = HK.sliceCpIndex = function (cpIndex, line) { |
|||
// Remove "old" checkpoints (cp sent before 100 messages ago)
|
|||
const minLine = Math.max(0, (line - 100)); |
|||
let start = cpIndex.slice(0, -2); |
|||
const end = cpIndex.slice(-2); |
|||
start = start.filter(function (obj) { |
|||
return obj.line > minLine; |
|||
}); |
|||
return start.concat(end); |
|||
}; |
|||
|
|||
const isMetadataMessage = HK.isMetadataMessage = function (parsed) { |
|||
return Boolean(parsed && parsed.channel); |
|||
}; |
|||
|
|||
HK.listAllowedUsers = function (metadata) { |
|||
return (metadata.owners || []).concat((metadata.allowed || [])); |
|||
}; |
|||
|
|||
HK.getNetfluxSession = function (Env, netfluxId) { |
|||
return Env.netfluxUsers[netfluxId]; |
|||
}; |
|||
|
|||
HK.isUserSessionAllowed = function (allowed, session) { |
|||
if (!session) { return false; } |
|||
for (var unsafeKey in session) { |
|||
if (allowed.indexOf(unsafeKey) !== -1) { |
|||
return true; |
|||
} |
|||
} |
|||
return false; |
|||
}; |
|||
|
|||
HK.authenticateNetfluxSession = function (Env, netfluxId, unsafeKey) { |
|||
var user = Env.netfluxUsers[netfluxId] = Env.netfluxUsers[netfluxId] || {}; |
|||
user[unsafeKey] = +new Date(); |
|||
}; |
|||
|
|||
HK.closeNetfluxSession = function (Env, netfluxId) { |
|||
delete Env.netfluxUsers[netfluxId]; |
|||
}; |
|||
|
|||
// validateKeyStrings supplied by clients must decode to 32-byte Uint8Arrays
|
|||
const isValidValidateKeyString = function (key) { |
|||
try { |
|||
return typeof(key) === 'string' && |
|||
Nacl.util.decodeBase64(key).length === Nacl.sign.publicKeyLength; |
|||
} catch (e) { |
|||
return false; |
|||
} |
|||
}; |
|||
|
|||
var CHECKPOINT_PATTERN = /^cp\|(([A-Za-z0-9+\/=]+)\|)?/; |
|||
|
|||
/* expireChannel is here to clean up channels that should have been removed |
|||
but for some reason are still present |
|||
*/ |
|||
const expireChannel = function (Env, channel) { |
|||
return void Env.store.archiveChannel(channel, function (err) { |
|||
Env.Log.info("ARCHIVAL_CHANNEL_BY_HISTORY_KEEPER_EXPIRATION", { |
|||
channelId: channel, |
|||
status: err? String(err): "SUCCESS", |
|||
}); |
|||
}); |
|||
}; |
|||
|
|||
/* dropChannel |
|||
* cleans up memory structures which are managed entirely by the historyKeeper |
|||
*/ |
|||
const dropChannel = HK.dropChannel = function (Env, chanName) { |
|||
delete Env.metadata_cache[chanName]; |
|||
delete Env.channel_cache[chanName]; |
|||
}; |
|||
|
|||
/* checkExpired |
|||
* synchronously returns true or undefined to indicate whether the channel is expired |
|||
* according to its metadata |
|||
* has some side effects: |
|||
* closes the channel via the store.closeChannel API |
|||
* and then broadcasts to all channel members that the channel has expired |
|||
* removes the channel from the netflux-server's in-memory cache |
|||
* removes the channel metadata from history keeper's in-memory cache |
|||
|
|||
FIXME the boolean nature of this API should be separated from its side effects |
|||
*/ |
|||
const checkExpired = function (Env, Server, channel) { |
|||
const store = Env.store; |
|||
const metadata_cache = Env.metadata_cache; |
|||
|
|||
if (!(channel && channel.length === STANDARD_CHANNEL_LENGTH)) { return false; } |
|||
let metadata = metadata_cache[channel]; |
|||
if (!(metadata && typeof(metadata.expire) === 'number')) { return false; } |
|||
|
|||
// the number of milliseconds ago the channel should have expired
|
|||
let pastDue = (+new Date()) - metadata.expire; |
|||
|
|||
// less than zero means that it hasn't expired yet
|
|||
if (pastDue < 0) { return false; } |
|||
|
|||
// if it should have expired more than a day ago...
|
|||
// there may have been a problem with scheduling tasks
|
|||
// or the scheduled tasks may not be running
|
|||
// so trigger a removal from here
|
|||
if (pastDue >= ONE_DAY) { expireChannel(Env, channel); } |
|||
|
|||
// close the channel
|
|||
store.closeChannel(channel, function () { |
|||
Server.channelBroadcast(channel, { |
|||
error: 'EEXPIRED', |
|||
channel: channel |
|||
}, Env.id); |
|||
dropChannel(Env, channel); |
|||
}); |
|||
|
|||
// return true to indicate that it has expired
|
|||
return true; |
|||
}; |
|||
|
|||
const getMetadata = HK.getMetadata = function (Env, channelName, _cb) { |
|||
var cb = Util.once(Util.mkAsync(_cb)); |
|||
|
|||
var metadata = Env.metadata_cache[channelName]; |
|||
if (metadata && typeof(metadata) === 'object') { |
|||
return void cb(undefined, metadata); |
|||
} |
|||
|
|||
MetaRPC.getMetadataRaw(Env, channelName, function (err, metadata) { |
|||
if (err) { |
|||
console.error(err); |
|||
return void cb(err); |
|||
} |
|||
if (!(metadata && typeof(metadata.channel) === 'string' && metadata.channel.length === STANDARD_CHANNEL_LENGTH)) { |
|||
return cb(); |
|||
} |
|||
|
|||
// cache it
|
|||
Env.metadata_cache[channelName] = metadata; |
|||
cb(undefined, metadata); |
|||
}); |
|||
}; |
|||
|
|||
/* getIndex |
|||
calls back with an error if anything goes wrong |
|||
or with a cached index for a channel if it exists |
|||
(along with metadata) |
|||
otherwise it calls back with the index computed by 'computeIndex' |
|||
|
|||
as an added bonus: |
|||
if the channel exists but its index does not then it caches the index |
|||
*/ |
|||
const getIndex = (Env, channelName, cb) => { |
|||
const channel_cache = Env.channel_cache; |
|||
|
|||
const chan = channel_cache[channelName]; |
|||
|
|||
// if there is a channel in memory and it has an index cached, return it
|
|||
if (chan && chan.index) { |
|||
// enforce async behaviour
|
|||
return void Util.mkAsync(cb)(undefined, chan.index); |
|||
} |
|||
|
|||
Env.batchIndexReads(channelName, cb, function (done) { |
|||
Env.computeIndex(Env, channelName, (err, ret) => { |
|||
// this is most likely an unrecoverable filesystem error
|
|||
if (err) { return void done(err); } |
|||
// cache the computed result if possible
|
|||
if (chan) { chan.index = ret; } |
|||
// return
|
|||
done(void 0, ret); |
|||
}); |
|||
}); |
|||
}; |
|||
|
|||
/* checkOffsetMap |
|||
|
|||
Sorry for the weird function --ansuz |
|||
|
|||
This should be almost equivalent to `Object.keys(map).length` except |
|||
that is will use less memory by not allocating space for the temporary array. |
|||
Beyond that, it returns length * -1 if any of the members of the map |
|||
are not in ascending order. The function for removing older members of the map |
|||
loops over elements in order and deletes them, so ordering is important! |
|||
|
|||
*/ |
|||
var checkOffsetMap = function (map) { |
|||
var prev = 0; |
|||
var cur; |
|||
var ooo = 0; // out of order
|
|||
var count = 0; |
|||
for (let k in map) { |
|||
count++; |
|||
cur = map[k]; |
|||
if (!ooo && prev > cur) { ooo = true; } |
|||
prev = cur; |
|||
} |
|||
return ooo ? count * -1: count; |
|||
}; |
|||
|
|||
/* Pass the map and the number of elements it contains */ |
|||
var trimOffsetByOrder = function (map, n) { |
|||
var toRemove = Math.max(n - 50, 0); |
|||
var i = 0; |
|||
for (let k in map) { |
|||
if (i >= toRemove) { return; } |
|||
i++; |
|||
delete map[k]; |
|||
} |
|||
}; |
|||
|
|||
/* Remove from the map any byte offsets which are below |
|||
the lowest offset you'd like to preserve |
|||
(probably the oldest checkpoint */ |
|||
var trimMapByOffset = function (map, offset) { |
|||
if (!offset) { return; } |
|||
for (let k in map) { |
|||
if (map[k] < offset) { |
|||
delete map[k]; |
|||
} |
|||
} |
|||
}; |
|||
|
|||
/* storeMessage |
|||
* channel id |
|||
* the message to store |
|||
* whether the message is a checkpoint |
|||
* optionally the hash of the message |
|||
* it's not always used, but we guard against it |
|||
|
|||
|
|||
* async but doesn't have a callback |
|||
* source of a race condition whereby: |
|||
* two messaages can be inserted |
|||
* two offsets can be computed using the total size of all the messages |
|||
* but the offsets don't correspond to the actual location of the newlines |
|||
* because the two actions were performed like ABba... |
|||
* the fix is to use callbacks and implement queueing for writes |
|||
* to guarantee that offset computation is always atomic with writes |
|||
*/ |
|||
const storeMessage = function (Env, channel, msg, isCp, optionalMessageHash) { |
|||
const id = channel.id; |
|||
const Log = Env.Log; |
|||
|
|||
Env.queueStorage(id, function (next) { |
|||
const msgBin = Buffer.from(msg + '\n', 'utf8'); |
|||
// Store the message first, and update the index only once it's stored.
|
|||
// store.messageBin can be async so updating the index first may
|
|||
// result in a wrong cpIndex
|
|||
nThen((waitFor) => { |
|||
Env.store.messageBin(id, msgBin, waitFor(function (err) { |
|||
if (err) { |
|||
waitFor.abort(); |
|||
Log.error("HK_STORE_MESSAGE_ERROR", err.message); |
|||
|
|||
// this error is critical, but there's not much we can do at the moment
|
|||
// proceed with more messages, but they'll probably fail too
|
|||
// at least you won't have a memory leak
|
|||
|
|||
// TODO make it possible to respond to clients with errors so they know
|
|||
// their message wasn't stored
|
|||
return void next(); |
|||
} |
|||
})); |
|||
}).nThen((waitFor) => { |
|||
getIndex(Env, id, waitFor((err, index) => { |
|||
if (err) { |
|||
Log.warn("HK_STORE_MESSAGE_INDEX", err.stack); |
|||
// non-critical, we'll be able to get the channel index later
|
|||
return void next(); |
|||
} |
|||
if (typeof (index.line) === "number") { index.line++; } |
|||
if (isCp) { |
|||
index.cpIndex = sliceCpIndex(index.cpIndex, index.line || 0); |
|||
trimMapByOffset(index.offsetByHash, index.cpIndex[0]); |
|||
index.cpIndex.push({ |
|||
offset: index.size, |
|||
line: ((index.line || 0) + 1) |
|||
}); |
|||
} |
|||
if (optionalMessageHash) { |
|||
index.offsetByHash[optionalMessageHash] = index.size; |
|||
index.offsets++; |
|||
} |
|||
if (index.offsets >= 100 && !index.cpIndex.length) { |
|||
let offsetCount = checkOffsetMap(index.offsetByHash); |
|||
if (offsetCount < 0) { |
|||
Log.warn('OFFSET_TRIM_OOO', { |
|||
channel: id, |
|||
map: index.OffsetByHash |
|||
}); |
|||
} else if (offsetCount > 0) { |
|||
trimOffsetByOrder(index.offsetByHash, index.offsets); |
|||
index.offsets = checkOffsetMap(index.offsetByHash); |
|||
} |
|||
} |
|||
index.size += msgBin.length; |
|||
|
|||
// handle the next element in the queue
|
|||
next(); |
|||
})); |
|||
}); |
|||
}); |
|||
}; |
|||
|
|||
|
|||
/* getHistoryOffset |
|||
returns a number representing the byte offset from the start of the log |
|||
for whatever history you're seeking. |
|||
|
|||
query by providing a 'lastKnownHash', |
|||
which is really just a string of the first 64 characters of an encrypted message. |
|||
OR by -1 which indicates that we want the full history (byte offset 0) |
|||
OR nothing, which indicates that you want whatever messages the historyKeeper deems relevant |
|||
(typically the last few checkpoints) |
|||
|
|||
this function embeds a lot of the history keeper's logic: |
|||
|
|||
0. if you passed -1 as the lastKnownHash it means you want the complete history |
|||
* I'm not sure why you'd need to call this function if you know it will return 0 in this case... |
|||
* it has a side-effect of filling the index cache if it's empty |
|||
1. if you provided a lastKnownHash and that message does not exist in the history: |
|||
* either the client has made a mistake or the history they knew about no longer exists |
|||
* call back with EUNKNOWN |
|||
2. if you did not provide a lastKnownHash |
|||
* and there are fewer than two checkpoints: |
|||
* return 0 (read from the start of the file) |
|||
* and there are two or more checkpoints: |
|||
* return the offset of the earliest checkpoint which 'sliceCpIndex' considers relevant |
|||
3. if you did provide a lastKnownHash |
|||
* read through the log until you find the hash that you're looking for |
|||
* call back with either the byte offset of the message that you found OR |
|||
* -1 if you didn't find it |
|||
|
|||
*/ |
|||
const getHistoryOffset = (Env, channelName, lastKnownHash, _cb) => { |
|||
const cb = Util.once(Util.mkAsync(_cb)); |
|||
|
|||
// lastKnownhash === -1 means we want the complete history
|
|||
if (lastKnownHash === -1) { return void cb(null, 0); } |
|||
let offset = -1; |
|||
nThen((waitFor) => { |
|||
getIndex(Env, channelName, waitFor((err, index) => { |
|||
if (err) { waitFor.abort(); return void cb(err); } |
|||
|
|||
// check if the "hash" the client is requesting exists in the index
|
|||
const lkh = index.offsetByHash[lastKnownHash]; |
|||
|
|||
// fall through to the next block if the offset of the hash in question is not in memory
|
|||
if (lastKnownHash && typeof(lkh) !== "number") { return; } |
|||
|
|||
// Since last 2 checkpoints
|
|||
if (!lastKnownHash) { |
|||
waitFor.abort(); |
|||
// Less than 2 checkpoints in the history: return everything
|
|||
if (index.cpIndex.length < 2) { return void cb(null, 0); } |
|||
// Otherwise return the second last checkpoint's index
|
|||
return void cb(null, index.cpIndex[0].offset); |
|||
/* LATER... |
|||
in practice, two checkpoints can be very close together |
|||
we have measures to avoid duplicate checkpoints, but editors |
|||
can produce nearby checkpoints which are slightly different, |
|||
and slip past these protections. To be really careful, we can |
|||
seek past nearby checkpoints by some number of patches so as |
|||
to ensure that all editors have sufficient knowledge of history |
|||
to reconcile their differences. */ |
|||
} |
|||
|
|||
offset = lkh; |
|||
})); |
|||
}).nThen((w) => { |
|||
// skip past this block if the offset is anything other than -1
|
|||
// this basically makes these first two nThen blocks behave like if-else
|
|||
if (offset !== -1) { return; } |
|||
|
|||
// either the message exists in history but is not in the cached index
|
|||
// or it does not exist at all. In either case 'getHashOffset' is expected
|
|||
// to return a number: -1 if not present, positive interger otherwise
|
|||
Env.getHashOffset(channelName, lastKnownHash, w(function (err, _offset) { |
|||
if (err) { |
|||
w.abort(); |
|||
return void cb(err); |
|||
} |
|||
offset = _offset; |
|||
})); |
|||
}).nThen(() => { |
|||
cb(null, offset); |
|||
}); |
|||
}; |
|||
|
|||
/* getHistoryAsync |
|||
* finds the appropriate byte offset from which to begin reading using 'getHistoryOffset' |
|||
* streams through the rest of the messages, safely parsing them and returning the parsed content to the handler |
|||
* calls back when it has reached the end of the log |
|||
|
|||
Used by: |
|||
* GET_HISTORY |
|||
|
|||
*/ |
|||
const getHistoryAsync = (Env, channelName, lastKnownHash, beforeHash, handler, cb) => { |
|||
const store = Env.store; |
|||
|
|||
let offset = -1; |
|||
nThen((waitFor) => { |
|||
getHistoryOffset(Env, channelName, lastKnownHash, waitFor((err, os) => { |
|||
if (err) { |
|||
waitFor.abort(); |
|||
return void cb(err); |
|||
} |
|||
offset = os; |
|||
})); |
|||
}).nThen((waitFor) => { |
|||
if (offset === -1) { |
|||
return void cb(new Error('EUNKNOWN')); |
|||
} |
|||
const start = (beforeHash) ? 0 : offset; |
|||
store.readMessagesBin(channelName, start, (msgObj, readMore, abort) => { |
|||
if (beforeHash && msgObj.offset >= offset) { return void abort(); } |
|||
var parsed = tryParse(Env, msgObj.buff.toString('utf8')); |
|||
if (!parsed) { return void readMore(); } |
|||
handler(parsed, readMore); |
|||
}, waitFor(function (err) { |
|||
return void cb(err); |
|||
})); |
|||
}); |
|||
}; |
|||
|
|||
const handleRPC = function (Env, Server, seq, userId, parsed) { |
|||
const HISTORY_KEEPER_ID = Env.id; |
|||
|
|||
/* RPC Calls... */ |
|||
var rpc_call = parsed.slice(1); |
|||
|
|||
Server.send(userId, [seq, 'ACK']); |
|||
try { |
|||
// slice off the sequence number and pass in the rest of the message
|
|||
Env.rpc(Server, userId, rpc_call, function (err, output) { |
|||
if (err) { |
|||
Server.send(userId, [0, HISTORY_KEEPER_ID, 'MSG', userId, JSON.stringify([parsed[0], 'ERROR', err])]); |
|||
return; |
|||
} |
|||
Server.send(userId, [0, HISTORY_KEEPER_ID, 'MSG', userId, JSON.stringify([parsed[0]].concat(output))]); |
|||
}); |
|||
} catch (e) { |
|||
// if anything throws in the middle, send an error
|
|||
Server.send(userId, [0, HISTORY_KEEPER_ID, 'MSG', userId, JSON.stringify([parsed[0], 'ERROR', 'SERVER_ERROR'])]); |
|||
} |
|||
}; |
|||
|
|||
/* |
|||
This is called when a user tries to connect to a channel that doesn't exist. |
|||
we initialize that channel by writing the metadata supplied by the user to its log. |
|||
if the provided metadata has an expire time then we also create a task to expire it. |
|||
*/ |
|||
const handleFirstMessage = function (Env, channelName, metadata) { |
|||
Env.store.writeMetadata(channelName, JSON.stringify(metadata), function (err) { |
|||
if (err) { |
|||
// FIXME tell the user that there was a channel error?
|
|||
return void Env.Log.error('HK_WRITE_METADATA', { |
|||
channel: channelName, |
|||
error: err, |
|||
}); |
|||
} |
|||
}); |
|||
|
|||
// write tasks
|
|||
if(metadata.expire && typeof(metadata.expire) === 'number') { |
|||
// the fun part...
|
|||
// the user has said they want this pad to expire at some point
|
|||
Env.writeTask(metadata.expire, "EXPIRE", [ channelName ], function (err) { |
|||
if (err) { |
|||
// if there is an error, we don't want to crash the whole server...
|
|||
// just log it, and if there's a problem you'll be able to fix it
|
|||
// at a later date with the provided information
|
|||
Env.Log.error('HK_CREATE_EXPIRE_TASK', err); |
|||
Env.Log.info('HK_INVALID_EXPIRE_TASK', JSON.stringify([metadata.expire, 'EXPIRE', channelName])); |
|||
} |
|||
}); |
|||
} |
|||
}; |
|||
|
|||
const handleGetHistory = function (Env, Server, seq, userId, parsed) { |
|||
const metadata_cache = Env.metadata_cache; |
|||
const HISTORY_KEEPER_ID = Env.id; |
|||
const Log = Env.Log; |
|||
|
|||
// parsed[1] is the channel id
|
|||
// parsed[2] is a validation key or an object containing metadata (optionnal)
|
|||
// parsed[3] is the last known hash (optionnal)
|
|||
|
|||
Server.send(userId, [seq, 'ACK']); |
|||
var channelName = parsed[1]; |
|||
var config = parsed[2]; |
|||
var metadata = {}; |
|||
var lastKnownHash; |
|||
var txid; |
|||
|
|||
// clients can optionally pass a map of attributes
|
|||
// if the channel already exists this map will be ignored
|
|||
// otherwise it will be stored as the initial metadata state for the channel
|
|||
if (config && typeof config === "object" && !Array.isArray(parsed[2])) { |
|||
lastKnownHash = config.lastKnownHash; |
|||
metadata = config.metadata || {}; |
|||
txid = config.txid; |
|||
if (metadata.expire) { |
|||
metadata.expire = +metadata.expire * 1000 + (+new Date()); |
|||
} |
|||
} |
|||
metadata.channel = channelName; |
|||
metadata.created = +new Date(); |
|||
|
|||
// if the user sends us an invalid key, we won't be able to validate their messages
|
|||
// so they'll never get written to the log anyway. Let's just drop their message
|
|||
// on the floor instead of doing a bunch of extra work
|
|||
// TODO send them an error message so they know something is wrong
|
|||
if (metadata.validateKey && !isValidValidateKeyString(metadata.validateKey)) { |
|||
return void Log.error('HK_INVALID_KEY', metadata.validateKey); |
|||
} |
|||
|
|||
nThen(function (waitFor) { |
|||
var w = waitFor(); |
|||
/* fetch the channel's metadata. |
|||
use it to check if the channel has expired. |
|||
send it to the client if it exists. |
|||
*/ |
|||
getMetadata(Env, channelName, waitFor(function (err, metadata) { |
|||
if (err) { |
|||
Env.Log.error('HK_GET_HISTORY_METADATA', { |
|||
channel: channelName, |
|||
error: err, |
|||
}); |
|||
return void w(); |
|||
} |
|||
if (!metadata || !metadata.channel) { return w(); } |
|||
// if there is already a metadata log then use it instead
|
|||
// of whatever the user supplied
|
|||
|
|||
// it's possible that the channel doesn't have metadata
|
|||
// but in that case there's no point in checking if the channel expired
|
|||
// or in trying to send metadata, so just skip this block
|
|||
if (!metadata) { return void w(); } |
|||
|
|||
// And then check if the channel is expired. If it is, send the error and abort
|
|||
// FIXME this is hard to read because 'checkExpired' has side effects
|
|||
if (checkExpired(Env, Server, channelName)) { return void waitFor.abort(); } |
|||
|
|||
// always send metadata with GET_HISTORY requests
|
|||
Server.send(userId, [0, HISTORY_KEEPER_ID, 'MSG', userId, JSON.stringify(metadata)], w); |
|||
})); |
|||
}).nThen(() => { |
|||
let msgCount = 0; |
|||
|
|||
// TODO compute lastKnownHash in a manner such that it will always skip past the metadata line?
|
|||
getHistoryAsync(Env, channelName, lastKnownHash, false, (msg, readMore) => { |
|||
msgCount++; |
|||
// avoid sending the metadata message a second time
|
|||
if (isMetadataMessage(msg) && metadata_cache[channelName]) { return readMore(); } |
|||
if (txid) { msg[0] = txid; } |
|||
Server.send(userId, [0, HISTORY_KEEPER_ID, 'MSG', userId, JSON.stringify(msg)], readMore); |
|||
}, (err) => { |
|||
if (err && err.code !== 'ENOENT') { |
|||
if (err.message !== 'EINVAL') { Log.error("HK_GET_HISTORY", { |
|||
err: err && err.message, |
|||
stack: err && err.stack, |
|||
}); } |
|||
const parsedMsg = {error:err.message, channel: channelName, txid: txid}; |
|||
Server.send(userId, [0, HISTORY_KEEPER_ID, 'MSG', userId, JSON.stringify(parsedMsg)]); |
|||
return; |
|||
} |
|||
|
|||
if (msgCount === 0 && !metadata_cache[channelName] && Server.channelContainsUser(channelName, userId)) { |
|||
handleFirstMessage(Env, channelName, metadata); |
|||
Server.send(userId, [0, HISTORY_KEEPER_ID, 'MSG', userId, JSON.stringify(metadata)]); |
|||
} |
|||
|
|||
// End of history message:
|
|||
let parsedMsg = {state: 1, channel: channelName, txid: txid}; |
|||
|
|||
Server.send(userId, [0, HISTORY_KEEPER_ID, 'MSG', userId, JSON.stringify(parsedMsg)]); |
|||
}); |
|||
}); |
|||
}; |
|||
|
|||
const handleGetHistoryRange = function (Env, Server, seq, userId, parsed) { |
|||
var channelName = parsed[1]; |
|||
var map = parsed[2]; |
|||
const HISTORY_KEEPER_ID = Env.id; |
|||
|
|||
if (!(map && typeof(map) === 'object')) { |
|||
return void Server.send(userId, [seq, 'ERROR', 'INVALID_ARGS', HISTORY_KEEPER_ID]); |
|||
} |
|||
|
|||
var oldestKnownHash = map.from; |
|||
var desiredMessages = map.count; |
|||
var desiredCheckpoint = map.cpCount; |
|||
var txid = map.txid; |
|||
if (typeof(desiredMessages) !== 'number' && typeof(desiredCheckpoint) !== 'number') { |
|||
return void Server.send(userId, [seq, 'ERROR', 'UNSPECIFIED_COUNT', HISTORY_KEEPER_ID]); |
|||
} |
|||
|
|||
if (!txid) { |
|||
return void Server.send(userId, [seq, 'ERROR', 'NO_TXID', HISTORY_KEEPER_ID]); |
|||
} |
|||
|
|||
Server.send(userId, [seq, 'ACK']); |
|||
Env.getOlderHistory(channelName, oldestKnownHash, desiredMessages, desiredCheckpoint, function (err, toSend) { |
|||
if (err && err.code !== 'ENOENT') { |
|||
Env.Log.error("HK_GET_OLDER_HISTORY", err); |
|||
} |
|||
|
|||
if (Array.isArray(toSend)) { |
|||
toSend.forEach(function (msg) { |
|||
Server.send(userId, [0, HISTORY_KEEPER_ID, 'MSG', userId, |
|||
JSON.stringify(['HISTORY_RANGE', txid, msg])]); |
|||
}); |
|||
} |
|||
|
|||
Server.send(userId, [0, HISTORY_KEEPER_ID, 'MSG', userId, |
|||
JSON.stringify(['HISTORY_RANGE_END', txid, channelName]) |
|||
]); |
|||
}); |
|||
}; |
|||
|
|||
const handleGetFullHistory = function (Env, Server, seq, userId, parsed) { |
|||
const HISTORY_KEEPER_ID = Env.id; |
|||
const Log = Env.Log; |
|||
|
|||
// parsed[1] is the channel id
|
|||
// parsed[2] is a validation key (optionnal)
|
|||
// parsed[3] is the last known hash (optionnal)
|
|||
|
|||
Server.send(userId, [seq, 'ACK']); |
|||
|
|||
// FIXME should we send metadata here too?
|
|||
// none of the clientside code which uses this API needs metadata, but it won't hurt to send it (2019-08-22)
|
|||
return void getHistoryAsync(Env, parsed[1], -1, false, (msg, readMore) => { |
|||
Server.send(userId, [0, HISTORY_KEEPER_ID, 'MSG', userId, JSON.stringify(['FULL_HISTORY', msg])], readMore); |
|||
}, (err) => { |
|||
let parsedMsg = ['FULL_HISTORY_END', parsed[1]]; |
|||
if (err) { |
|||
Log.error('HK_GET_FULL_HISTORY', err.stack); |
|||
parsedMsg = ['ERROR', parsed[1], err.message]; |
|||
} |
|||
Server.send(userId, [0, HISTORY_KEEPER_ID, 'MSG', userId, JSON.stringify(parsedMsg)]); |
|||
}); |
|||
}; |
|||
|
|||
const directMessageCommands = { |
|||
GET_HISTORY: handleGetHistory, |
|||
GET_HISTORY_RANGE: handleGetHistoryRange, |
|||
GET_FULL_HISTORY: handleGetFullHistory, |
|||
}; |
|||
|
|||
/* onDirectMessage |
|||
* exported for use by the netflux-server |
|||
* parses and handles all direct messages directed to the history keeper |
|||
* check if it's expired and execute all the associated side-effects |
|||
* routes queries to the appropriate handlers |
|||
*/ |
|||
HK.onDirectMessage = function (Env, Server, seq, userId, json) { |
|||
const Log = Env.Log; |
|||
const HISTORY_KEEPER_ID = Env.id; |
|||
Log.silly('HK_MESSAGE', json); |
|||
|
|||
let parsed; |
|||
try { |
|||
parsed = JSON.parse(json[2]); |
|||
} catch (err) { |
|||
Log.error("HK_PARSE_CLIENT_MESSAGE", json); |
|||
return; |
|||
} |
|||
|
|||
var first = parsed[0]; |
|||
|
|||
if (typeof(directMessageCommands[first]) !== 'function') { |
|||
// it's either an unsupported command or an RPC call
|
|||
// either way, RPC has it covered
|
|||
return void handleRPC(Env, Server, seq, userId, parsed); |
|||
} |
|||
|
|||
// otherwise it's some kind of history retrieval command...
|
|||
// go grab its metadata, because unfortunately people can ask for history
|
|||
// whether or not they have joined the channel, so we can't rely on JOIN restriction
|
|||
// to stop people from loading history they shouldn't see.
|
|||
var channelName = parsed[1]; |
|||
nThen(function (w) { |
|||
getMetadata(Env, channelName, w(function (err, metadata) { |
|||
if (err) { |
|||
// stream errors?
|
|||
// we should log these, but if we can't load metadata
|
|||
// then it's probably not restricted or expired
|
|||
// it's not like anything else will recover from this anyway
|
|||
return; |
|||
} |
|||
|
|||
|
|||
// likewise, we can't do anything more here if there's no metadata
|
|||
// jump to the next block
|
|||
if (!metadata) { return; } |
|||
|
|||
// If the requested history is for an expired channel, abort
|
|||
// checkExpired has side effects and will disconnect users for you...
|
|||
if (checkExpired(Env, Server, parsed[1])) { |
|||
// if the channel is expired just abort.
|
|||
w.abort(); |
|||
return; |
|||
} |
|||
|
|||
// jump to handling the command if there's no restriction...
|
|||
if (!metadata.restricted) { return; } |
|||
|
|||
// check if the user is in the allow list...
|
|||
const allowed = HK.listAllowedUsers(metadata); |
|||
const session = HK.getNetfluxSession(Env, userId); |
|||
|
|||
if (HK.isUserSessionAllowed(allowed, session)) { |
|||
return; |
|||
} |
|||
|
|||
/* Anyone in the userlist that isn't in the allow list should have already |
|||
been kicked out of the channel. Likewise, disallowed users should not |
|||
be able to add themselves to the userlist because JOIN commands respect |
|||
access control settings. The error that is sent below protects against |
|||
the remaining case, in which users try to get history without having |
|||
joined the channel. Normally we'd send the allow list to tell them the |
|||
key with which they should authenticate, but since we don't use this |
|||
behaviour, I'm doing the easy thing and just telling them to GO AWAY. |
|||
|
|||
We can implement the more advanced behaviour later if it turns out that |
|||
we need it. This command validates guards against all kinds of history |
|||
access: GET_HISTORY, GET_HISTORY_RANGE, GET_FULL_HISTORY. |
|||
*/ |
|||
|
|||
w.abort(); |
|||
return void Server.send(userId, [ |
|||
seq, |
|||
'ERROR', |
|||
'ERESTRICTED', |
|||
HISTORY_KEEPER_ID |
|||
]); |
|||
})); |
|||
}).nThen(function () { |
|||
// run the appropriate command from the map
|
|||
directMessageCommands[first](Env, Server, seq, userId, parsed); |
|||
}); |
|||
}; |
|||
|
|||
/* onChannelMessage |
|||
Determine what we should store when a message a broadcasted to a channel" |
|||
|
|||
* ignores ephemeral channels |
|||
* ignores messages sent to expired channels |
|||
* rejects duplicated checkpoints |
|||
* validates messages to channels that have validation keys |
|||
* caches the id of the last saved checkpoint |
|||
* adds timestamps to incoming messages |
|||
* writes messages to the store |
|||
*/ |
|||
HK.onChannelMessage = function (Env, Server, channel, msgStruct) { |
|||
//console.log(+new Date(), "onChannelMessage");
|
|||
const Log = Env.Log; |
|||
|
|||
// TODO our usage of 'channel' here looks prone to errors
|
|||
// we only use it for its 'id', but it can contain other stuff
|
|||
// also, we're using this RPC from both the RPC and Netflux-server
|
|||
// we should probably just change this to expect a channel id directly
|
|||
|
|||
// don't store messages if the channel id indicates that it's an ephemeral message
|
|||
if (!channel.id || channel.id.length === EPHEMERAL_CHANNEL_LENGTH) { return; } |
|||
|
|||
const isCp = /^cp\|/.test(msgStruct[4]); |
|||
let id; |
|||
if (isCp) { |
|||
// id becomes either null or an array or results...
|
|||
id = CHECKPOINT_PATTERN.exec(msgStruct[4]); |
|||
if (Array.isArray(id) && id[2] && id[2] === channel.lastSavedCp) { |
|||
// Reject duplicate checkpoints
|
|||
return; |
|||
} |
|||
} |
|||
|
|||
let metadata; |
|||
nThen(function (w) { |
|||
getMetadata(Env, channel.id, w(function (err, _metadata) { |
|||
// if there's no channel metadata then it can't be an expiring channel
|
|||
// nor can we possibly validate it
|
|||
if (!_metadata) { return; } |
|||
metadata = _metadata; |
|||
|
|||
// don't write messages to expired channels
|
|||
if (checkExpired(Env, Server, channel)) { return void w.abort(); } |
|||
})); |
|||
}).nThen(function (w) { |
|||
// if there's no validateKey present skip to the next block
|
|||
if (!(metadata && metadata.validateKey)) { return; } |
|||
|
|||
// trim the checkpoint indicator off the message if it's present
|
|||
let signedMsg = (isCp) ? msgStruct[4].replace(CHECKPOINT_PATTERN, '') : msgStruct[4]; |
|||
// convert the message from a base64 string into a Uint8Array
|
|||
|
|||
//const txid = Util.uid();
|
|||
|
|||
// Listen for messages
|
|||
//console.log(+new Date(), "Send verification request");
|
|||
Env.validateMessage(signedMsg, metadata.validateKey, w(function (err) { |
|||
// no errors means success
|
|||
if (!err) { return; } |
|||
// validation can fail in multiple ways
|
|||
if (err === 'FAILED') { |
|||
// we log this case, but not others for some reason
|
|||
Log.info("HK_SIGNED_MESSAGE_REJECTED", 'Channel '+channel.id); |
|||
} |
|||
// always abort if there was an error...
|
|||
return void w.abort(); |
|||
})); |
|||
}).nThen(function () { |
|||
// do checkpoint stuff...
|
|||
|
|||
// 1. get the checkpoint id
|
|||
// 2. reject duplicate checkpoints
|
|||
|
|||
if (isCp) { |
|||
// if the message is a checkpoint we will have already validated
|
|||
// that it isn't a duplicate. remember its id so that we can
|
|||
// repeat this process for the next incoming checkpoint
|
|||
|
|||
// WARNING: the fact that we only check the most recent checkpoints
|
|||
// is a potential source of bugs if one editor has high latency and
|
|||
// pushes a duplicate of an earlier checkpoint than the latest which
|
|||
// has been pushed by editors with low latency
|
|||
// FIXME
|
|||
if (Array.isArray(id) && id[2]) { |
|||
// Store new checkpoint hash
|
|||
channel.lastSavedCp = id[2]; |
|||
} |
|||
} |
|||
|
|||
// add the time to the message
|
|||
msgStruct.push(now()); |
|||
|
|||
// storeMessage
|
|||
//console.log(+new Date(), "Storing message");
|
|||
storeMessage(Env, channel, JSON.stringify(msgStruct), isCp, getHash(msgStruct[4], Log)); |
|||
//console.log(+new Date(), "Message stored");
|
|||
}); |
|||
}; |
|||
|
|||
|
|||
@ -1,7 +0,0 @@ |
|||
module.exports = function (f, g) { |
|||
return function () { |
|||
if (!f) { return; } |
|||
f.apply(this, Array.prototype.slice.call(arguments)); |
|||
f = g; |
|||
}; |
|||
}; |
|||
@ -0,0 +1,235 @@ |
|||
/* |
|||
|
|||
There are many situations where we want to do lots of little jobs |
|||
in parallel and with few constraints as to their ordering. |
|||
|
|||
One example is recursing over a bunch of directories and reading files. |
|||
The naive way to do this is to recurse over all the subdirectories |
|||
relative to a root while adding files to a list. Then to iterate over |
|||
the files in that list. Unfortunately, this means holding the complete |
|||
list of file paths in memory, which can't possible scale as our database grows. |
|||
|
|||
A better way to do this is to recurse into one directory and |
|||
iterate over its contents until there are no more, then to backtrack |
|||
to the next directory and repeat until no more directories exist. |
|||
This kind of thing is easy enough when you perform one task at a time |
|||
and use synchronous code, but with multiple asynchronous tasks it's |
|||
easy to introduce subtle bugs. |
|||
|
|||
This module is designed for these situations. It allows you to easily |
|||
and efficiently schedule a large number of tasks with an associated |
|||
degree of priority from 0 (highest priority) to Number.MAX_SAFE_INTEGER. |
|||
|
|||
Initialize your scheduler with a degree of parallelism, and start planning |
|||
some initial jobs. Set it to run and it will keep going until all jobs are |
|||
complete, at which point it will optionally execute a 'done' callback. |
|||
|
|||
Getting back to the original example: |
|||
|
|||
List the contents of the root directory, then plan subsequent jobs |
|||
with a priority of 1 to recurse into subdirectories. The callback |
|||
of each of these recursions can then plan higher priority tasks |
|||
to actually process the contained files with a priority of 0. |
|||
|
|||
As long as there are more files scheduled it will continue to process |
|||
them first. When there are no more files the scheduler will read |
|||
the next directory and repopulate the list of files to process. |
|||
This will repeat until everything is done. |
|||
|
|||
// load the module
|
|||
const Plan = require("./plan"); |
|||
|
|||
// instantiate a scheduler with a parallelism of 5
|
|||
var plan = Plan(5) |
|||
|
|||
// plan the first job which schedules more jobs...
|
|||
.job(1, function (next) { |
|||
listRootDirectory(function (files) { |
|||
files.forEach(function (file) { |
|||
// highest priority, run as soon as there is a free worker
|
|||
plan.job(0, function (next) { |
|||
processFile(file, function (result) { |
|||
console.log(result); |
|||
// don't forget to call next
|
|||
next(); |
|||
}); |
|||
}); |
|||
}); |
|||
next(); // call 'next' to free up one worker
|
|||
}); |
|||
}) |
|||
// chain commands together if you want
|
|||
.done(function () { |
|||
console.log("DONE"); |
|||
}) |
|||
// it won't run unless you launch it
|
|||
.start(); |
|||
|
|||
*/ |
|||
|
|||
module.exports = function (max) { |
|||
var plan = {}; |
|||
max = max || 5; |
|||
|
|||
// finds an id that isn't in use in a particular map
|
|||
// accepts an id in case you have one already chosen
|
|||
// otherwise generates random new ids if one is not passed
|
|||
// or if there is a collision
|
|||
var uid = function (map, id) { |
|||
if (typeof(id) === 'undefined') { |
|||
id = Math.floor(Math.random() * Number.MAX_SAFE_INTEGER); |
|||
} |
|||
if (id && typeof(map[id]) === 'undefined') { |
|||
return id; |
|||
} |
|||
return uid(map); |
|||
}; |
|||
|
|||
// the queue of jobs is an array, which will be populated
|
|||
// with maps for each level of priority
|
|||
var jobs = []; |
|||
|
|||
// the count of currently running jobs
|
|||
var count = 0; |
|||
|
|||
// a list of callbacks to be executed once everything is done
|
|||
var completeHandlers = []; |
|||
|
|||
// the recommended usage is to create a new scheduler for every job
|
|||
// use it for internals in a scope, and let the garbage collector
|
|||
// clean up when everything stops. This means you shouldn't
|
|||
// go passing 'plan' around in a long-lived process!
|
|||
var FINISHED = false; |
|||
var done = function () { |
|||
// 'done' gets called when there are no more jobs in the queue
|
|||
// but other jobs might still be running...
|
|||
|
|||
// the count of running processes should never be less than zero
|
|||
// because we guard against multiple callbacks
|
|||
if (count < 0) { throw new Error("should never happen"); } |
|||
// greater than zero is definitely possible, it just means you aren't done yet
|
|||
if (count !== 0) { return; } |
|||
// you will finish twice if you call 'start' a second time
|
|||
// this behaviour isn't supported yet.
|
|||
if (FINISHED) { throw new Error('finished twice'); } |
|||
FINISHED = true; |
|||
// execute all your 'done' callbacks
|
|||
completeHandlers.forEach(function (f) { f(); }); |
|||
}; |
|||
|
|||
var run; |
|||
|
|||
// this 'next' is internal only.
|
|||
// it iterates over all known jobs, running them until
|
|||
// the scheduler achieves the desired amount of parallelism.
|
|||
// If there are no more jobs it will call 'done'
|
|||
// which will shortcircuit if there are still pending tasks.
|
|||
// Whenever any tasks finishes it will return its lock and
|
|||
// run as many new jobs as are allowed.
|
|||
var next = function () { |
|||
// array.some skips over bare indexes in sparse arrays
|
|||
var pending = jobs.some(function (bag /*, priority*/) { |
|||
if (!bag || typeof(bag) !== 'object') { return; } |
|||
// a bag is a map of jobs for any particular degree of priority
|
|||
// iterate over jobs in the bag until you're out of 'workers'
|
|||
for (var id in bag) { |
|||
// bail out if you hit max parallelism
|
|||
if (count >= max) { return true; } |
|||
run(bag, id, next); |
|||
} |
|||
}); |
|||
// check whether you're done if you hit the end of the array
|
|||
if (!pending) { done(); } |
|||
}; |
|||
|
|||
// and here's the part that actually handles jobs...
|
|||
run = function (bag, id) { |
|||
// this is just a sanity check.
|
|||
// there should only ever be jobs in each bag.
|
|||
if (typeof(bag[id]) !== 'function') { |
|||
throw new Error("expected function"); |
|||
} |
|||
|
|||
// keep a local reference to the function
|
|||
var f = bag[id]; |
|||
// remove it from the bag.
|
|||
delete bag[id]; |
|||
// increment the count of running jobs
|
|||
count++; |
|||
|
|||
// guard against it being called twice.
|
|||
var called = false; |
|||
f(function () { |
|||
// watch out! it'll bite you.
|
|||
// maybe this should just return?
|
|||
// support that option for 'production' ?
|
|||
if (called) { throw new Error("called twice"); } |
|||
// the code below is safe because we can't call back a second time
|
|||
called = true; |
|||
|
|||
// decrement the count of running jobs...
|
|||
count--; |
|||
|
|||
// and finally call next to replace this worker with more job(s)
|
|||
next(); |
|||
}); |
|||
}; |
|||
|
|||
// this is exposed as API
|
|||
plan.job = function (priority, cb) { |
|||
// you have to pass both the priority (a non-negative number) and an actual job
|
|||
if (typeof(priority) !== 'number' || priority < 0) { throw new Error('expected a non-negative number'); } |
|||
// a job is an asynchronous function that takes a single parameter:
|
|||
// a 'next' callback which will keep the whole thing going.
|
|||
// forgetting to call 'next' means you'll never complete.
|
|||
if (typeof(cb) !== 'function') { throw new Error('expected function'); } |
|||
|
|||
// initialize the specified priority level if it doesn't already exist
|
|||
var bag = jobs[priority] = jobs[priority] || {}; |
|||
// choose a random id that isn't already in use for this priority level
|
|||
var id = uid(bag); |
|||
|
|||
// add the job to this priority level's bag
|
|||
// most (all?) javascript engines will append this job to the bottom
|
|||
// of the map. Meaning when we iterate it will be run later than
|
|||
// other jobs that were scheduled first, effectively making a FIFO queue.
|
|||
// However, this is undefined behaviour and you shouldn't ever rely on it.
|
|||
bag[id] = function (next) { |
|||
cb(next); |
|||
}; |
|||
// returning 'plan' lets us chain methods together.
|
|||
return plan; |
|||
}; |
|||
|
|||
var started = false; |
|||
plan.start = function () { |
|||
// don't allow multiple starts
|
|||
// even though it should work, it's simpler not to.
|
|||
if (started) { return plan; } |
|||
// this seems to imply a 'stop' method
|
|||
// but I don't need it, so I'm not implementing it now --ansuz
|
|||
started = true; |
|||
|
|||
// start asynchronously, otherwise jobs will start running
|
|||
// before you've had a chance to return 'plan', and weird things
|
|||
// happen.
|
|||
setTimeout(function () { |
|||
next(); |
|||
}); |
|||
return plan; |
|||
}; |
|||
|
|||
// you can pass any number of functions to be executed
|
|||
// when all pending jobs are complete.
|
|||
// We don't pass any arguments, so you need to handle return values
|
|||
// yourself if you want them.
|
|||
plan.done = function (f) { |
|||
if (typeof(f) !== 'function') { throw new Error('expected function'); } |
|||
completeHandlers.push(f); |
|||
return plan; |
|||
}; |
|||
|
|||
// That's all! I hope you had fun reading this!
|
|||
return plan; |
|||
}; |
|||
|
|||
@ -0,0 +1,216 @@ |
|||
/*jshint esversion: 6 */ |
|||
const Util = require("./common-util"); |
|||
|
|||
const Core = require("./commands/core"); |
|||
const Admin = require("./commands/admin-rpc"); |
|||
const Pinning = require("./commands/pin-rpc"); |
|||
const Quota = require("./commands/quota"); |
|||
const Block = require("./commands/block"); |
|||
const Metadata = require("./commands/metadata"); |
|||
const Channel = require("./commands/channel"); |
|||
const Upload = require("./commands/upload"); |
|||
const HK = require("./hk-util"); |
|||
|
|||
var RPC = module.exports; |
|||
|
|||
const UNAUTHENTICATED_CALLS = { |
|||
GET_FILE_SIZE: Pinning.getFileSize, |
|||
GET_MULTIPLE_FILE_SIZE: Pinning.getMultipleFileSize, |
|||
GET_DELETED_PADS: Pinning.getDeletedPads, |
|||
IS_CHANNEL_PINNED: Pinning.isChannelPinned, // FIXME drop this RPC
|
|||
IS_NEW_CHANNEL: Channel.isNewChannel, |
|||
WRITE_PRIVATE_MESSAGE: Channel.writePrivateMessage, |
|||
GET_METADATA: Metadata.getMetadata, |
|||
}; |
|||
|
|||
var isUnauthenticateMessage = function (msg) { |
|||
return msg && msg.length === 2 && typeof(UNAUTHENTICATED_CALLS[msg[0]]) === 'function'; |
|||
}; |
|||
|
|||
var handleUnauthenticatedMessage = function (Env, msg, respond, Server, netfluxId) { |
|||
Env.Log.silly('LOG_RPC', msg[0]); |
|||
|
|||
var method = UNAUTHENTICATED_CALLS[msg[0]]; |
|||
method(Env, msg[1], function (err, value) { |
|||
if (err) { |
|||
Env.WARN(err, msg[1]); |
|||
return void respond(err); |
|||
} |
|||
respond(err, [null, value, null]); |
|||
}, Server, netfluxId); |
|||
}; |
|||
|
|||
const AUTHENTICATED_USER_TARGETED = { |
|||
RESET: Pinning.resetUserPins, |
|||
PIN: Pinning.pinChannel, |
|||
UNPIN: Pinning.unpinChannel, |
|||
CLEAR_OWNED_CHANNEL: Channel.clearOwnedChannel, |
|||
REMOVE_OWNED_CHANNEL: Channel.removeOwnedChannel, |
|||
TRIM_HISTORY: Channel.trimHistory, |
|||
UPLOAD_STATUS: Upload.status, |
|||
UPLOAD: Upload.upload, |
|||
UPLOAD_COMPLETE: Upload.complete, |
|||
UPLOAD_CANCEL: Upload.cancel, |
|||
OWNED_UPLOAD_COMPLETE: Upload.complete_owned, |
|||
WRITE_LOGIN_BLOCK: Block.writeLoginBlock, |
|||
REMOVE_LOGIN_BLOCK: Block.removeLoginBlock, |
|||
ADMIN: Admin.command, |
|||
SET_METADATA: Metadata.setMetadata, |
|||
}; |
|||
|
|||
const AUTHENTICATED_USER_SCOPED = { |
|||
GET_HASH: Pinning.getHash, |
|||
GET_TOTAL_SIZE: Pinning.getTotalSize, |
|||
UPDATE_LIMITS: Quota.getUpdatedLimit, |
|||
GET_LIMIT: Pinning.getLimit, |
|||
EXPIRE_SESSION: Core.expireSessionAsync, |
|||
REMOVE_PINS: Pinning.removePins, |
|||
TRIM_PINS: Pinning.trimPins, |
|||
COOKIE: Core.haveACookie, |
|||
}; |
|||
|
|||
var isAuthenticatedCall = function (call) { |
|||
if (call === 'UPLOAD') { return false; } |
|||
return typeof(AUTHENTICATED_USER_TARGETED[call] || AUTHENTICATED_USER_SCOPED[call]) === 'function'; |
|||
}; |
|||
|
|||
var handleAuthenticatedMessage = function (Env, unsafeKey, msg, respond, Server) { |
|||
/* If you have gotten this far, you have signed the message with the |
|||
public key which you provided. |
|||
*/ |
|||
|
|||
var safeKey = Util.escapeKeyCharacters(unsafeKey); |
|||
|
|||
var Respond = function (e, value) { |
|||
var session = Env.Sessions[safeKey]; |
|||
var token = session? session.tokens.slice(-1)[0]: ''; |
|||
var cookie = Core.makeCookie(token).join('|'); |
|||
respond(e ? String(e): e, [cookie].concat(typeof(value) !== 'undefined' ?value: [])); |
|||
}; |
|||
|
|||
msg.shift(); |
|||
// discard validated cookie from message
|
|||
if (!msg.length) { |
|||
return void Respond('INVALID_MSG'); |
|||
} |
|||
|
|||
var TYPE = msg[0]; |
|||
|
|||
Env.Log.silly('LOG_RPC', TYPE); |
|||
|
|||
if (typeof(AUTHENTICATED_USER_TARGETED[TYPE]) === 'function') { |
|||
return void AUTHENTICATED_USER_TARGETED[TYPE](Env, safeKey, msg[1], function (e, value) { |
|||
Env.WARN(e, value); |
|||
return void Respond(e, value); |
|||
}, Server); |
|||
} |
|||
|
|||
if (typeof(AUTHENTICATED_USER_SCOPED[TYPE]) === 'function') { |
|||
return void AUTHENTICATED_USER_SCOPED[TYPE](Env, safeKey, function (e, value) { |
|||
if (e) { |
|||
Env.WARN(e, safeKey); |
|||
return void Respond(e); |
|||
} |
|||
Respond(e, value); |
|||
}); |
|||
} |
|||
|
|||
return void Respond('UNSUPPORTED_RPC_CALL', msg); |
|||
}; |
|||
|
|||
var rpc = function (Env, Server, userId, data, respond) { |
|||
if (!Array.isArray(data)) { |
|||
Env.Log.debug('INVALID_ARG_FORMET', data); |
|||
return void respond('INVALID_ARG_FORMAT'); |
|||
} |
|||
|
|||
if (!data.length) { |
|||
return void respond("INSUFFICIENT_ARGS"); |
|||
} else if (data.length !== 1) { |
|||
Env.Log.debug('UNEXPECTED_ARGUMENTS_LENGTH', data); |
|||
} |
|||
|
|||
var msg = data[0].slice(0); |
|||
|
|||
if (!Array.isArray(msg)) { |
|||
return void respond('INVALID_ARG_FORMAT'); |
|||
} |
|||
|
|||
if (isUnauthenticateMessage(msg)) { |
|||
return handleUnauthenticatedMessage(Env, msg, respond, Server, userId); |
|||
} |
|||
|
|||
var signature = msg.shift(); |
|||
var publicKey = msg.shift(); |
|||
|
|||
// make sure a user object is initialized in the cookie jar
|
|||
var session; |
|||
if (publicKey) { |
|||
session = Core.getSession(Env.Sessions, publicKey); |
|||
} else { |
|||
Env.Log.debug("NO_PUBLIC_KEY_PROVIDED", publicKey); |
|||
} |
|||
|
|||
var cookie = msg[0]; |
|||
if (!Core.isValidCookie(Env.Sessions, publicKey, cookie)) { |
|||
// no cookie is fine if the RPC is to get a cookie
|
|||
if (msg[1] !== 'COOKIE') { |
|||
return void respond('NO_COOKIE'); |
|||
} |
|||
} |
|||
|
|||
var serialized = JSON.stringify(msg); |
|||
|
|||
if (!(serialized && typeof(publicKey) === 'string')) { |
|||
return void respond('INVALID_MESSAGE_OR_PUBLIC_KEY'); |
|||
} |
|||
|
|||
var command = msg[1]; |
|||
|
|||
if (command === 'UPLOAD') { |
|||
// UPLOAD is a special case that skips signature validation
|
|||
// intentional fallthrough behaviour
|
|||
return void handleAuthenticatedMessage(Env, publicKey, msg, respond, Server); |
|||
} |
|||
if (isAuthenticatedCall(command)) { |
|||
// check the signature on the message
|
|||
// refuse the command if it doesn't validate
|
|||
return void Env.checkSignature(serialized, signature, publicKey, function (err) { |
|||
if (err) { |
|||
return void respond("INVALID_SIGNATURE_OR_PUBLIC_KEY"); |
|||
} |
|||
HK.authenticateNetfluxSession(Env, userId, publicKey); |
|||
return void handleAuthenticatedMessage(Env, publicKey, msg, respond, Server); |
|||
}); |
|||
} |
|||
Env.Log.warn('INVALID_RPC_CALL', command); |
|||
return void respond("INVALID_RPC_CALL"); |
|||
}; |
|||
|
|||
RPC.create = function (Env, cb) { |
|||
var Sessions = Env.Sessions; |
|||
var updateLimitDaily = function () { |
|||
Quota.updateCachedLimits(Env, function (e) { |
|||
if (e) { |
|||
Env.WARN('limitUpdate', e); |
|||
} |
|||
}); |
|||
}; |
|||
Quota.applyCustomLimits(Env); |
|||
updateLimitDaily(); |
|||
Env.intervals.dailyLimitUpdate = setInterval(updateLimitDaily, 24*3600*1000); |
|||
|
|||
// expire old sessions once per minute
|
|||
Env.intervals.sessionExpirationInterval = setInterval(function () { |
|||
Core.expireSessions(Sessions); |
|||
}, Core.SESSION_EXPIRATION_TIME); |
|||
|
|||
cb(void 0, function (Server, userId, data, respond) { |
|||
try { |
|||
return rpc(Env, Server, userId, data, respond); |
|||
} catch (e) { |
|||
console.log("Error from RPC with data " + JSON.stringify(data)); |
|||
console.log(e.stack); |
|||
} |
|||
}); |
|||
}; |
|||
@ -0,0 +1,172 @@ |
|||
var WriteQueue = require("./write-queue"); |
|||
var Util = require("./common-util"); |
|||
|
|||
/* This module provides implements a FIFO scheduler |
|||
which assumes the existence of three types of async tasks: |
|||
|
|||
1. ordered tasks which must be executed sequentially |
|||
2. unordered tasks which can be executed in parallel |
|||
3. blocking tasks which must block the execution of all other tasks |
|||
|
|||
The scheduler assumes there will be many resources identified by strings, |
|||
and that the constraints described above will only apply in the context |
|||
of identical string ids. |
|||
|
|||
Many blocking tasks may be executed in parallel so long as they |
|||
concern resources identified by different ids. |
|||
|
|||
USAGE: |
|||
|
|||
const schedule = require("./schedule")(); |
|||
|
|||
// schedule two sequential tasks using the resource 'pewpew'
|
|||
schedule.ordered('pewpew', function (next) { |
|||
appendToFile('beep\n', next); |
|||
}); |
|||
schedule.ordered('pewpew', function (next) { |
|||
appendToFile('boop\n', next); |
|||
}); |
|||
|
|||
// schedule a task that can happen whenever
|
|||
schedule.unordered('pewpew', function (next) { |
|||
displayFileSize(next); |
|||
}); |
|||
|
|||
// schedule a blocking task which will wait
|
|||
// until the all unordered tasks have completed before commencing
|
|||
schedule.blocking('pewpew', function (next) { |
|||
deleteFile(next); |
|||
}); |
|||
|
|||
// this will be queued for after the blocking task
|
|||
schedule.ordered('pewpew', function (next) { |
|||
appendFile('boom', next); |
|||
}); |
|||
|
|||
*/ |
|||
|
|||
// return a uid which is not already in a map
|
|||
var unusedUid = function (set) { |
|||
var uid = Util.uid(); |
|||
if (set[uid]) { return unusedUid(); } |
|||
return uid; |
|||
}; |
|||
|
|||
// return an existing session, creating one if it does not already exist
|
|||
var lookup = function (map, id) { |
|||
return (map[id] = map[id] || { |
|||
//blocking: [],
|
|||
active: {}, |
|||
blocked: {}, |
|||
}); |
|||
}; |
|||
|
|||
var isEmpty = function (map) { |
|||
for (var key in map) { |
|||
if (map.hasOwnProperty(key)) { return false; } |
|||
} |
|||
return true; |
|||
}; |
|||
|
|||
module.exports = function () { |
|||
// every scheduler instance has its own queue
|
|||
var queue = WriteQueue(); |
|||
|
|||
// ordered tasks don't require any extra logic
|
|||
var Ordered = function (id, task) { |
|||
queue(id, task); |
|||
}; |
|||
|
|||
// unordered and blocking tasks need a little extra state
|
|||
var map = {}; |
|||
|
|||
// regular garbage collection keeps memory consumption low
|
|||
var collectGarbage = function (id) { |
|||
// avoid using 'lookup' since it creates a session implicitly
|
|||
var local = map[id]; |
|||
// bail out if no session
|
|||
if (!local) { return; } |
|||
// bail out if there are blocking or active tasks
|
|||
if (local.lock) { return; } |
|||
if (!isEmpty(local.active)) { return; } |
|||
// if there are no pending actions then delete the session
|
|||
delete map[id]; |
|||
}; |
|||
|
|||
// unordered tasks run immediately if there are no blocking tasks scheduled
|
|||
// or immediately after blocking tasks finish
|
|||
var runImmediately = function (local, task) { |
|||
// set a flag in the map of active unordered tasks
|
|||
// to prevent blocking tasks from running until you finish
|
|||
var uid = unusedUid(local.active); |
|||
local.active[uid] = true; |
|||
|
|||
task(function () { |
|||
// remove the flag you set to indicate that your task completed
|
|||
delete local.active[uid]; |
|||
// don't do anything if other unordered tasks are still running
|
|||
if (!isEmpty(local.active)) { return; } |
|||
// bail out if there are no blocking tasks scheduled or ready
|
|||
if (typeof(local.waiting) !== 'function') { |
|||
return void collectGarbage(); |
|||
} |
|||
setTimeout(local.waiting); |
|||
}); |
|||
}; |
|||
|
|||
var runOnceUnblocked = function (local, task) { |
|||
var uid = unusedUid(local.blocked); |
|||
local.blocked[uid] = function () { |
|||
runImmediately(local, task); |
|||
}; |
|||
}; |
|||
|
|||
// 'unordered' tasks are scheduled to run in after the most recently received blocking task
|
|||
// or immediately and in parallel if there are no blocking tasks scheduled.
|
|||
var Unordered = function (id, task) { |
|||
var local = lookup(map, id); |
|||
if (local.lock) { return runOnceUnblocked(local, task); } |
|||
runImmediately(local, task); |
|||
}; |
|||
|
|||
var runBlocked = function (local) { |
|||
for (var task in local.blocked) { |
|||
runImmediately(local, local.blocked[task]); |
|||
} |
|||
}; |
|||
|
|||
// 'blocking' tasks must be run alone.
|
|||
// They are queued alongside ordered tasks,
|
|||
// and wait until any running 'unordered' tasks complete before commencing.
|
|||
var Blocking = function (id, task) { |
|||
var local = lookup(map, id); |
|||
|
|||
queue(id, function (next) { |
|||
// start right away if there are no running unordered tasks
|
|||
if (isEmpty(local.active)) { |
|||
local.lock = true; |
|||
return void task(function () { |
|||
delete local.lock; |
|||
runBlocked(local); |
|||
next(); |
|||
}); |
|||
} |
|||
// otherwise wait until the running tasks have completed
|
|||
local.waiting = function () { |
|||
local.lock = true; |
|||
task(function () { |
|||
delete local.lock; |
|||
delete local.waiting; |
|||
runBlocked(local); |
|||
next(); |
|||
}); |
|||
}; |
|||
}); |
|||
}; |
|||
|
|||
return { |
|||
ordered: Ordered, |
|||
unordered: Unordered, |
|||
blocking: Blocking, |
|||
}; |
|||
}; |
|||
@ -0,0 +1,628 @@ |
|||
/* globals Buffer */ |
|||
var Fs = require("fs"); |
|||
var Fse = require("fs-extra"); |
|||
var Path = require("path"); |
|||
|
|||
var BlobStore = module.exports; |
|||
var nThen = require("nthen"); |
|||
var Semaphore = require("saferphore"); |
|||
var Util = require("../common-util"); |
|||
|
|||
var isValidSafeKey = function (safeKey) { |
|||
return typeof(safeKey) === 'string' && !/\//.test(safeKey) && safeKey.length === 44; |
|||
}; |
|||
|
|||
var isValidId = function (id) { |
|||
return typeof(id) === 'string' && id.length === 48 && !/[^a-f0-9]/.test(id); |
|||
}; |
|||
|
|||
// helpers
|
|||
|
|||
var prependArchive = function (Env, path) { |
|||
return Path.join(Env.archivePath, path); |
|||
}; |
|||
|
|||
// /blob/<safeKeyPrefix>/<safeKey>/<blobPrefix>/<blobId>
|
|||
var makeBlobPath = function (Env, blobId) { |
|||
return Path.join(Env.blobPath, blobId.slice(0, 2), blobId); |
|||
}; |
|||
|
|||
// /blobstate/<safeKeyPrefix>/<safeKey>
|
|||
var makeStagePath = function (Env, safeKey) { |
|||
return Path.join(Env.blobStagingPath, safeKey.slice(0, 2), safeKey); |
|||
}; |
|||
|
|||
// /blob/<safeKeyPrefix>/<safeKey>/<blobPrefix>/<blobId>
|
|||
var makeProofPath = function (Env, safeKey, blobId) { |
|||
return Path.join(Env.blobPath, safeKey.slice(0, 3), safeKey, blobId.slice(0, 2), blobId); |
|||
}; |
|||
|
|||
var parseProofPath = function (path) { |
|||
var parts = path.split('/'); |
|||
return { |
|||
blobId: parts[parts.length -1], |
|||
safeKey: parts[parts.length - 3], |
|||
}; |
|||
}; |
|||
|
|||
// getUploadSize: used by
|
|||
// getFileSize
|
|||
var getUploadSize = function (Env, blobId, cb) { |
|||
var path = makeBlobPath(Env, blobId); |
|||
if (!path) { return cb('INVALID_UPLOAD_ID'); } |
|||
Fs.stat(path, function (err, stats) { |
|||
if (err) { |
|||
// if a file was deleted, its size is 0 bytes
|
|||
if (err.code === 'ENOENT') { return cb(void 0, 0); } |
|||
return void cb(err.code); |
|||
} |
|||
cb(void 0, stats.size); |
|||
}); |
|||
}; |
|||
|
|||
// isFile: used by
|
|||
// removeOwnedBlob
|
|||
// uploadComplete
|
|||
// uploadStatus
|
|||
var isFile = function (filePath, cb) { |
|||
Fs.stat(filePath, function (e, stats) { |
|||
if (e) { |
|||
if (e.code === 'ENOENT') { return void cb(void 0, false); } |
|||
return void cb(e.message); |
|||
} |
|||
return void cb(void 0, stats.isFile()); |
|||
}); |
|||
}; |
|||
|
|||
var makeFileStream = function (full, _cb) { |
|||
var cb = Util.once(Util.mkAsync(_cb)); |
|||
Fse.mkdirp(Path.dirname(full), function (e) { |
|||
if (e || !full) { // !full for pleasing flow, it's already checked
|
|||
return void cb(e ? e.message : 'INTERNAL_ERROR'); |
|||
} |
|||
|
|||
try { |
|||
var stream = Fs.createWriteStream(full, { |
|||
flags: 'a', |
|||
encoding: 'binary', |
|||
highWaterMark: Math.pow(2, 16), |
|||
}); |
|||
stream.on('open', function () { |
|||
cb(void 0, stream); |
|||
}); |
|||
stream.on('error', function (err) { |
|||
cb(err); |
|||
}); |
|||
} catch (err) { |
|||
cb('BAD_STREAM'); |
|||
} |
|||
}); |
|||
}; |
|||
|
|||
/********** METHODS **************/ |
|||
|
|||
var upload = function (Env, safeKey, content, cb) { |
|||
var dec; |
|||
|
|||
try { dec = Buffer.from(content, 'base64'); } |
|||
catch (e) { return void cb('DECODE_BUFFER'); } |
|||
var len = dec.length; |
|||
|
|||
var session = Env.getSession(safeKey); |
|||
|
|||
if (typeof(session.currentUploadSize) !== 'number' || |
|||
typeof(session.pendingUploadSize) !== 'number') { |
|||
// improperly initialized... maybe they didn't check before uploading?
|
|||
// reject it, just in case
|
|||
return cb('NOT_READY'); |
|||
} |
|||
|
|||
if (session.currentUploadSize > session.pendingUploadSize) { |
|||
return cb('E_OVER_LIMIT'); |
|||
} |
|||
|
|||
var stagePath = makeStagePath(Env, safeKey); |
|||
|
|||
if (!session.blobstage) { |
|||
makeFileStream(stagePath, function (e, stream) { |
|||
if (!stream) { return void cb(e); } |
|||
|
|||
var blobstage = session.blobstage = stream; |
|||
blobstage.write(dec); |
|||
session.currentUploadSize += len; |
|||
cb(void 0, dec.length); |
|||
}); |
|||
} else { |
|||
session.blobstage.write(dec); |
|||
session.currentUploadSize += len; |
|||
cb(void 0, dec.length); |
|||
} |
|||
}; |
|||
|
|||
// upload_cancel
|
|||
var upload_cancel = function (Env, safeKey, fileSize, cb) { |
|||
var session = Env.getSession(safeKey); |
|||
session.pendingUploadSize = fileSize; |
|||
session.currentUploadSize = 0; |
|||
if (session.blobstage) { |
|||
session.blobstage.close(); |
|||
delete session.blobstage; |
|||
} |
|||
|
|||
var path = makeStagePath(Env, safeKey); |
|||
|
|||
Fs.unlink(path, function (e) { |
|||
if (e) { return void cb('E_UNLINK'); } |
|||
cb(void 0); |
|||
}); |
|||
}; |
|||
|
|||
// upload_complete
|
|||
var upload_complete = function (Env, safeKey, id, cb) { |
|||
var session = Env.getSession(safeKey); |
|||
|
|||
if (session.blobstage && session.blobstage.close) { |
|||
session.blobstage.close(); |
|||
delete session.blobstage; |
|||
} |
|||
|
|||
var oldPath = makeStagePath(Env, safeKey); |
|||
var newPath = makeBlobPath(Env, id); |
|||
|
|||
nThen(function (w) { |
|||
// make sure the path to your final location exists
|
|||
Fse.mkdirp(Path.dirname(newPath), function (e) { |
|||
if (e) { |
|||
w.abort(); |
|||
return void cb('RENAME_ERR'); |
|||
} |
|||
}); |
|||
}).nThen(function (w) { |
|||
// make sure there's not already something in that exact location
|
|||
isFile(newPath, function (e, yes) { |
|||
if (e) { |
|||
w.abort(); |
|||
return void cb(e); |
|||
} |
|||
if (yes) { |
|||
w.abort(); |
|||
return void cb('RENAME_ERR'); |
|||
} |
|||
cb(void 0, newPath, id); |
|||
}); |
|||
}).nThen(function () { |
|||
// finally, move the old file to the new path
|
|||
// FIXME we could just move and handle the EEXISTS instead of the above block
|
|||
Fse.move(oldPath, newPath, function (e) { |
|||
if (e) { return void cb('RENAME_ERR'); } |
|||
cb(void 0, id); |
|||
}); |
|||
}); |
|||
}; |
|||
|
|||
var tryId = function (path, cb) { |
|||
Fs.access(path, Fs.constants.R_OK | Fs.constants.W_OK, function (e) { |
|||
if (!e) { |
|||
// generate a new id (with the same prefix) and recurse
|
|||
//WARN('ownedUploadComplete', 'id is already used '+ id);
|
|||
return void cb('EEXISTS'); |
|||
} else if (e.code === 'ENOENT') { |
|||
// no entry, so it's safe for us to proceed
|
|||
return void cb(); |
|||
} else { |
|||
// it failed in an unexpected way. log it
|
|||
//WARN('ownedUploadComplete', e);
|
|||
return void cb(e.code); |
|||
} |
|||
}); |
|||
}; |
|||
|
|||
// owned_upload_complete
|
|||
var owned_upload_complete = function (Env, safeKey, id, cb) { |
|||
var session = Env.getSession(safeKey); |
|||
|
|||
// the file has already been uploaded to the staging area
|
|||
// close the pending writestream
|
|||
if (session.blobstage && session.blobstage.close) { |
|||
session.blobstage.close(); |
|||
delete session.blobstage; |
|||
} |
|||
|
|||
if (!isValidId(id)) { |
|||
//WARN('ownedUploadComplete', "id is invalid");
|
|||
return void cb('EINVAL_ID'); |
|||
} |
|||
|
|||
var oldPath = makeStagePath(Env, safeKey); |
|||
if (typeof(oldPath) !== 'string') { |
|||
return void cb('EINVAL_CONFIG'); |
|||
} |
|||
|
|||
var finalPath = makeBlobPath(Env, id); |
|||
|
|||
var finalOwnPath = makeProofPath(Env, safeKey, id); |
|||
|
|||
// the user wants to move it into blob and create a empty file with the same id
|
|||
// in their own space:
|
|||
// /blob/safeKeyPrefix/safeKey/blobPrefix/blobID
|
|||
|
|||
nThen(function (w) { |
|||
// make the requisite directory structure using Mkdirp
|
|||
Fse.mkdirp(Path.dirname(finalPath), w(function (e /*, path */) { |
|||
if (e) { // does not throw error if the directory already existed
|
|||
w.abort(); |
|||
return void cb(e.code); |
|||
} |
|||
})); |
|||
Fse.mkdirp(Path.dirname(finalOwnPath), w(function (e /*, path */) { |
|||
if (e) { // does not throw error if the directory already existed
|
|||
w.abort(); |
|||
return void cb(e.code); |
|||
} |
|||
})); |
|||
}).nThen(function (w) { |
|||
// make sure the id does not collide with another
|
|||
tryId(finalPath, w(function (e) { |
|||
if (e) { |
|||
w.abort(); |
|||
return void cb(e); |
|||
} |
|||
})); |
|||
}).nThen(function (w) { |
|||
// Create the empty file proving ownership
|
|||
Fs.writeFile(finalOwnPath, '', w(function (e) { |
|||
if (e) { |
|||
w.abort(); |
|||
return void cb(e.code); |
|||
} |
|||
// otherwise it worked...
|
|||
})); |
|||
}).nThen(function (w) { |
|||
// move the existing file to its new path
|
|||
Fse.move(oldPath, finalPath, w(function (e) { |
|||
if (e) { |
|||
// if there's an error putting the file into its final location...
|
|||
// ... you should remove the ownership file
|
|||
Fs.unlink(finalOwnPath, function () { |
|||
// but if you can't, it's not catestrophic
|
|||
// we can clean it up later
|
|||
}); |
|||
w.abort(); |
|||
return void cb(e.code); |
|||
} |
|||
// otherwise it worked...
|
|||
})); |
|||
}).nThen(function () { |
|||
// clean up their session when you're done
|
|||
// call back with the blob id...
|
|||
cb(void 0, id); |
|||
}); |
|||
}; |
|||
|
|||
// removeBlob
|
|||
var remove = function (Env, blobId, cb) { |
|||
var blobPath = makeBlobPath(Env, blobId); |
|||
Fs.unlink(blobPath, cb); // TODO COLDSTORAGE
|
|||
}; |
|||
|
|||
// removeProof
|
|||
var removeProof = function (Env, safeKey, blobId, cb) { |
|||
var proofPath = makeProofPath(Env, safeKey, blobId); |
|||
Fs.unlink(proofPath, cb); |
|||
}; |
|||
|
|||
// isOwnedBy(id, safeKey)
|
|||
var isOwnedBy = function (Env, safeKey, blobId, cb) { |
|||
var proofPath = makeProofPath(Env, safeKey, blobId); |
|||
isFile(proofPath, cb); |
|||
}; |
|||
|
|||
|
|||
// archiveBlob
|
|||
var archiveBlob = function (Env, blobId, cb) { |
|||
var blobPath = makeBlobPath(Env, blobId); |
|||
var archivePath = prependArchive(Env, blobPath); |
|||
Fse.move(blobPath, archivePath, { overwrite: true }, cb); |
|||
}; |
|||
|
|||
var removeArchivedBlob = function (Env, blobId, cb) { |
|||
var archivePath = prependArchive(Env, makeBlobPath(Env, blobId)); |
|||
Fs.unlink(archivePath, cb); |
|||
}; |
|||
|
|||
// restoreBlob
|
|||
var restoreBlob = function (Env, blobId, cb) { |
|||
var blobPath = makeBlobPath(Env, blobId); |
|||
var archivePath = prependArchive(Env, blobPath); |
|||
Fse.move(archivePath, blobPath, cb); |
|||
}; |
|||
|
|||
// archiveProof
|
|||
var archiveProof = function (Env, safeKey, blobId, cb) { |
|||
var proofPath = makeProofPath(Env, safeKey, blobId); |
|||
var archivePath = prependArchive(Env, proofPath); |
|||
Fse.move(proofPath, archivePath, { overwrite: true }, cb); |
|||
}; |
|||
|
|||
var removeArchivedProof = function (Env, safeKey, blobId, cb) { |
|||
var archivedPath = prependArchive(Env, makeProofPath(Env, safeKey, blobId)); |
|||
Fs.unlink(archivedPath, cb); |
|||
}; |
|||
|
|||
// restoreProof
|
|||
var restoreProof = function (Env, safeKey, blobId, cb) { |
|||
var proofPath = makeProofPath(Env, safeKey, blobId); |
|||
var archivePath = prependArchive(Env, proofPath); |
|||
Fse.move(archivePath, proofPath, cb); |
|||
}; |
|||
|
|||
var makeWalker = function (n, handleChild, done) { |
|||
if (!n || typeof(n) !== 'number' || n < 2) { n = 2; } |
|||
|
|||
var W; |
|||
nThen(function (w) { |
|||
// this asynchronous bit defers the completion of this block until
|
|||
// synchronous execution has completed. This means you must create
|
|||
// the walker and start using it synchronously or else it will call back
|
|||
// prematurely
|
|||
setTimeout(w()); |
|||
W = w; |
|||
}).nThen(function () { |
|||
done(); |
|||
}); |
|||
|
|||
// do no more than 20 jobs at a time
|
|||
var tasks = Semaphore.create(n); |
|||
|
|||
var recurse = function (path) { |
|||
tasks.take(function (give) { |
|||
var next = give(W()); |
|||
|
|||
nThen(function (w) { |
|||
// check if the path is a directory...
|
|||
Fs.stat(path, w(function (err, stats) { |
|||
if (err) { return next(); } |
|||
if (!stats.isDirectory()) { |
|||
w.abort(); |
|||
return void handleChild(void 0, path, next); |
|||
} |
|||
// fall through
|
|||
})); |
|||
}).nThen(function () { |
|||
// handle directories
|
|||
Fs.readdir(path, function (err, dir) { |
|||
if (err) { return next(); } |
|||
// everything is fine and it's a directory...
|
|||
dir.forEach(function (d) { |
|||
recurse(Path.join(path, d)); |
|||
}); |
|||
next(); |
|||
}); |
|||
}); |
|||
}); |
|||
}; |
|||
|
|||
return recurse; |
|||
}; |
|||
|
|||
var listProofs = function (root, handler, cb) { |
|||
Fs.readdir(root, function (err, dir) { |
|||
if (err) { return void cb(err); } |
|||
|
|||
var walk = makeWalker(20, function (err, path, next) { |
|||
// path is the path to a child node on the filesystem
|
|||
|
|||
// next handles the next job in a queue
|
|||
|
|||
// iterate over proofs
|
|||
// check for presence of corresponding files
|
|||
Fs.stat(path, function (err, stats) { |
|||
if (err) { |
|||
return void handler(err, void 0, next); |
|||
} |
|||
|
|||
var parsed = parseProofPath(path); |
|||
handler(void 0, { |
|||
path: path, |
|||
blobId: parsed.blobId, |
|||
safeKey: parsed.safeKey, |
|||
atime: stats.atime, |
|||
ctime: stats.ctime, |
|||
mtime: stats.mtime, |
|||
}, next); |
|||
}); |
|||
}, function () { |
|||
// called when there are no more directories or children to process
|
|||
cb(); |
|||
}); |
|||
|
|||
dir.forEach(function (d) { |
|||
// ignore directories that aren't 3 characters long...
|
|||
if (d.length !== 3) { return; } |
|||
walk(Path.join(root, d)); |
|||
}); |
|||
}); |
|||
}; |
|||
|
|||
var listBlobs = function (root, handler, cb) { |
|||
// iterate over files
|
|||
Fs.readdir(root, function (err, dir) { |
|||
if (err) { return void cb(err); } |
|||
var walk = makeWalker(20, function (err, path, next) { |
|||
Fs.stat(path, function (err, stats) { |
|||
if (err) { |
|||
return void handler(err, void 0, next); |
|||
} |
|||
|
|||
handler(void 0, { |
|||
blobId: Path.basename(path), |
|||
atime: stats.atime, |
|||
ctime: stats.ctime, |
|||
mtime: stats.mtime, |
|||
}, next); |
|||
}); |
|||
}, function () { |
|||
cb(); |
|||
}); |
|||
|
|||
dir.forEach(function (d) { |
|||
if (d.length !== 2) { return; } |
|||
walk(Path.join(root, d)); |
|||
}); |
|||
}); |
|||
}; |
|||
|
|||
BlobStore.create = function (config, _cb) { |
|||
var cb = Util.once(Util.mkAsync(_cb)); |
|||
if (typeof(config.getSession) !== 'function') { |
|||
return void cb("getSession method required"); |
|||
} |
|||
|
|||
var Env = { |
|||
blobPath: config.blobPath || './blob', |
|||
blobStagingPath: config.blobStagingPath || './blobstage', |
|||
archivePath: config.archivePath || './data/archive', |
|||
getSession: config.getSession, |
|||
}; |
|||
|
|||
nThen(function (w) { |
|||
var CB = Util.both(w.abort, cb); |
|||
Fse.mkdirp(Env.blobPath, w(function (e) { |
|||
if (e) { CB(e); } |
|||
})); |
|||
Fse.mkdirp(Env.blobStagingPath, w(function (e) { |
|||
if (e) { CB(e); } |
|||
})); |
|||
|
|||
Fse.mkdirp(Path.join(Env.archivePath, Env.blobPath), w(function (e) { |
|||
if (e) { CB(e); } |
|||
})); |
|||
}).nThen(function () { |
|||
var methods = { |
|||
isFileId: isValidId, |
|||
status: function (safeKey, _cb) { |
|||
// TODO check if the final destination is a file
|
|||
// because otherwise two people can try to upload to the same location
|
|||
// and one will fail, invalidating their hard work
|
|||
var cb = Util.once(Util.mkAsync(_cb)); |
|||
if (!isValidSafeKey(safeKey)) { return void cb('INVALID_SAFEKEY'); } |
|||
isFile(makeStagePath(Env, safeKey), cb); |
|||
}, |
|||
upload: function (safeKey, content, _cb) { |
|||
var cb = Util.once(Util.mkAsync(_cb)); |
|||
if (!isValidSafeKey(safeKey)) { return void cb('INVALID_SAFEKEY'); } |
|||
upload(Env, safeKey, content, Util.once(Util.mkAsync(cb))); |
|||
}, |
|||
|
|||
cancel: function (safeKey, fileSize, _cb) { |
|||
var cb = Util.once(Util.mkAsync(_cb)); |
|||
if (!isValidSafeKey(safeKey)) { return void cb('INVALID_SAFEKEY'); } |
|||
if (typeof(fileSize) !== 'number' || isNaN(fileSize) || fileSize <= 0) { return void cb("INVALID_FILESIZE"); } |
|||
upload_cancel(Env, safeKey, fileSize, cb); |
|||
}, |
|||
|
|||
isOwnedBy: function (safeKey, blobId, _cb) { |
|||
var cb = Util.once(Util.mkAsync(_cb)); |
|||
if (!isValidSafeKey(safeKey)) { return void cb('INVALID_SAFEKEY'); } |
|||
isOwnedBy(Env, safeKey, blobId, cb); |
|||
}, |
|||
|
|||
remove: { |
|||
blob: function (blobId, _cb) { |
|||
var cb = Util.once(Util.mkAsync(_cb)); |
|||
if (!isValidId(blobId)) { return void cb("INVALID_ID"); } |
|||
remove(Env, blobId, cb); |
|||
}, |
|||
proof: function (safeKey, blobId, _cb) { |
|||
var cb = Util.once(Util.mkAsync(_cb)); |
|||
if (!isValidSafeKey(safeKey)) { return void cb('INVALID_SAFEKEY'); } |
|||
if (!isValidId(blobId)) { return void cb("INVALID_ID"); } |
|||
removeProof(Env, safeKey, blobId, cb); |
|||
}, |
|||
archived: { |
|||
blob: function (blobId, _cb) { |
|||
var cb = Util.once(Util.mkAsync(_cb)); |
|||
if (!isValidId(blobId)) { return void cb("INVALID_ID"); } |
|||
removeArchivedBlob(Env, blobId, cb); |
|||
}, |
|||
proof: function (safeKey, blobId, _cb) { |
|||
var cb = Util.once(Util.mkAsync(_cb)); |
|||
if (!isValidSafeKey(safeKey)) { return void cb('INVALID_SAFEKEY'); } |
|||
if (!isValidId(blobId)) { return void cb("INVALID_ID"); } |
|||
removeArchivedProof(Env, safeKey, blobId, cb); |
|||
}, |
|||
}, |
|||
}, |
|||
|
|||
archive: { |
|||
blob: function (blobId, _cb) { |
|||
var cb = Util.once(Util.mkAsync(_cb)); |
|||
if (!isValidId(blobId)) { return void cb("INVALID_ID"); } |
|||
archiveBlob(Env, blobId, cb); |
|||
}, |
|||
proof: function (safeKey, blobId, _cb) { |
|||
var cb = Util.once(Util.mkAsync(_cb)); |
|||
if (!isValidSafeKey(safeKey)) { return void cb('INVALID_SAFEKEY'); } |
|||
if (!isValidId(blobId)) { return void cb("INVALID_ID"); } |
|||
archiveProof(Env, safeKey, blobId, cb); |
|||
}, |
|||
}, |
|||
|
|||
restore: { |
|||
blob: function (blobId, _cb) { |
|||
var cb = Util.once(Util.mkAsync(_cb)); |
|||
if (!isValidId(blobId)) { return void cb("INVALID_ID"); } |
|||
restoreBlob(Env, blobId, cb); |
|||
}, |
|||
proof: function (safeKey, blobId, _cb) { |
|||
var cb = Util.once(Util.mkAsync(_cb)); |
|||
if (!isValidSafeKey(safeKey)) { return void cb('INVALID_SAFEKEY'); } |
|||
if (!isValidId(blobId)) { return void cb("INVALID_ID"); } |
|||
restoreProof(Env, safeKey, blobId, cb); |
|||
}, |
|||
}, |
|||
|
|||
complete: function (safeKey, id, _cb) { |
|||
var cb = Util.once(Util.mkAsync(_cb)); |
|||
if (!isValidSafeKey(safeKey)) { return void cb('INVALID_SAFEKEY'); } |
|||
if (!isValidId(id)) { return void cb("INVALID_ID"); } |
|||
upload_complete(Env, safeKey, id, cb); |
|||
}, |
|||
completeOwned: function (safeKey, id, _cb) { |
|||
var cb = Util.once(Util.mkAsync(_cb)); |
|||
if (!isValidSafeKey(safeKey)) { return void cb('INVALID_SAFEKEY'); } |
|||
if (!isValidId(id)) { return void cb("INVALID_ID"); } |
|||
owned_upload_complete(Env, safeKey, id, cb); |
|||
}, |
|||
size: function (id, _cb) { |
|||
var cb = Util.once(Util.mkAsync(_cb)); |
|||
if (!isValidId(id)) { return void cb("INVALID_ID"); } |
|||
getUploadSize(Env, id, cb); |
|||
}, |
|||
|
|||
list: { |
|||
blobs: function (handler, _cb) { |
|||
var cb = Util.once(Util.mkAsync(_cb)); |
|||
listBlobs(Env.blobPath, handler, cb); |
|||
}, |
|||
proofs: function (handler, _cb) { |
|||
var cb = Util.once(Util.mkAsync(_cb)); |
|||
listProofs(Env.blobPath, handler, cb); |
|||
}, |
|||
archived: { |
|||
proofs: function (handler, _cb) { |
|||
var cb = Util.once(Util.mkAsync(_cb)); |
|||
listProofs(prependArchive(Env, Env.blobPath), handler, cb); |
|||
}, |
|||
blobs: function (handler, _cb) { |
|||
var cb = Util.once(Util.mkAsync(_cb)); |
|||
listBlobs(prependArchive(Env, Env.blobPath), handler, cb); |
|||
}, |
|||
} |
|||
}, |
|||
}; |
|||
|
|||
cb(void 0, methods); |
|||
}); |
|||
}; |
|||
|
|||
1260
lib/storage/file.js
File diff suppressed because it is too large
View File
File diff suppressed because it is too large
View File
@ -0,0 +1,397 @@ |
|||
var Fs = require("fs"); |
|||
var Fse = require("fs-extra"); |
|||
var Path = require("path"); |
|||
var nacl = require("tweetnacl/nacl-fast"); |
|||
var nThen = require("nthen"); |
|||
|
|||
var Tasks = module.exports; |
|||
|
|||
var tryParse = function (s) { |
|||
try { return JSON.parse(s); } |
|||
catch (e) { return null; } |
|||
}; |
|||
|
|||
var encode = function (time, command, args) { |
|||
if (typeof(time) !== 'number') { return null; } |
|||
if (typeof(command) !== 'string') { return null; } |
|||
if (!Array.isArray(args)) { return [time, command]; } |
|||
return [time, command].concat(args); |
|||
}; |
|||
|
|||
/* |
|||
var randomId = function () { |
|||
var bytes = Array.prototype.slice.call(nacl.randomBytes(16)); |
|||
return bytes.map(function (b) { |
|||
var n = Number(b & 0xff).toString(16); |
|||
return n.length === 1? '0' + n: n; |
|||
}).join(''); |
|||
}; |
|||
|
|||
|
|||
var mkPath = function (env, id) { |
|||
return Path.join(env.root, id.slice(0, 2), id) + '.ndjson'; |
|||
}; |
|||
*/ |
|||
|
|||
// make a new folder every MODULUS ms
|
|||
var MODULUS = 1000 * 60 * 60 * 24; // one day
|
|||
var moduloTime = function (d) { |
|||
return d - (d % MODULUS); |
|||
}; |
|||
|
|||
var makeDirectoryId = function (d) { |
|||
return '' + moduloTime(d); |
|||
}; |
|||
|
|||
var write = function (env, task, cb) { |
|||
var str = JSON.stringify(task) + '\n'; |
|||
var id = nacl.util.encodeBase64(nacl.hash(nacl.util.decodeUTF8(str))).replace(/\//g, '-'); |
|||
|
|||
var dir = makeDirectoryId(task[0]); |
|||
var path = Path.join(env.root, dir); |
|||
|
|||
nThen(function (w) { |
|||
// create the parent directory if it does not exist
|
|||
Fse.mkdirp(path, 0x1ff, w(function (err) { |
|||
if (err) { |
|||
w.abort(); |
|||
return void cb(err); |
|||
} |
|||
})); |
|||
}).nThen(function () { |
|||
// write the file to the path
|
|||
var fullPath = Path.join(path, id + '.ndjson'); |
|||
|
|||
// the file ids are based on the hash of the file contents to be written
|
|||
// as such, writing an exact task a second time will overwrite the first with the same contents
|
|||
// this shouldn't be a problem
|
|||
|
|||
Fs.writeFile(fullPath, str, function (e) { |
|||
if (e) { |
|||
env.log.error("TASK_WRITE_FAILURE", { |
|||
error: e, |
|||
path: fullPath, |
|||
}); |
|||
return void cb(e); |
|||
} |
|||
env.log.info("SUCCESSFUL_WRITE", { |
|||
path: fullPath, |
|||
}); |
|||
cb(); |
|||
}); |
|||
}); |
|||
}; |
|||
|
|||
var remove = function (env, path, cb) { |
|||
// FIXME COLDSTORAGE?
|
|||
Fs.unlink(path, cb); |
|||
}; |
|||
|
|||
var removeDirectory = function (env, path, cb) { |
|||
Fs.rmdir(path, cb); |
|||
}; |
|||
|
|||
var list = Tasks.list = function (env, cb, migration) { |
|||
var rootDirs; |
|||
|
|||
nThen(function (w) { |
|||
// read the root directory
|
|||
Fs.readdir(env.root, w(function (e, list) { |
|||
if (e) { |
|||
env.log.error("TASK_ROOT_DIR", { |
|||
root: env.root, |
|||
error: e, |
|||
}); |
|||
w.abort(); |
|||
return void cb(e); |
|||
} |
|||
if (list.length === 0) { |
|||
w.abort(); |
|||
return void cb(void 0, []); |
|||
} |
|||
rootDirs = list; |
|||
})); |
|||
}).nThen(function () { |
|||
// schedule the nested directories for exploration
|
|||
// return a list of paths to tasks
|
|||
var queue = nThen(function () {}); |
|||
|
|||
var allPaths = []; |
|||
|
|||
var currentWindow = moduloTime(+new Date() + MODULUS); |
|||
|
|||
// We prioritize a small footprint over speed, so we
|
|||
// iterate over directories in serial rather than parallel
|
|||
rootDirs.forEach(function (dir) { |
|||
// if a directory is two characters, it's the old format
|
|||
// otherwise, it indicates when the file is set to expire
|
|||
// so we can ignore directories which are clearly in the future
|
|||
|
|||
var dirTime; |
|||
if (migration) { |
|||
// this block handles migrations. ignore new formats
|
|||
if (dir.length !== 2) { |
|||
return; |
|||
} |
|||
} else { |
|||
// not in migration mode, check if it's a new format
|
|||
if (dir.length >= 2) { |
|||
// might be the new format.
|
|||
// check its time to see if it should be skipped
|
|||
dirTime = parseInt(dir); |
|||
if (!isNaN(dirTime) && dirTime >= currentWindow) { |
|||
return; |
|||
} |
|||
} |
|||
} |
|||
|
|||
queue.nThen(function (w) { |
|||
var subPath = Path.join(env.root, dir); |
|||
Fs.readdir(subPath, w(function (e, paths) { |
|||
if (e) { |
|||
env.log.error("TASKS_INVALID_SUBDIR", { |
|||
path: subPath, |
|||
error: e, |
|||
}); |
|||
return; |
|||
} |
|||
|
|||
if (paths.length === 0) { |
|||
removeDirectory(env, subPath, function (err) { |
|||
if (err) { |
|||
env.log.error('TASKS_REMOVE_EMPTY_DIRECTORY', { |
|||
error: err, |
|||
path: subPath, |
|||
}); |
|||
} |
|||
}); |
|||
} |
|||
|
|||
// concat in place
|
|||
Array.prototype.push.apply(allPaths, paths.map(function (p) { |
|||
return Path.join(subPath, p); |
|||
})); |
|||
})); |
|||
}); |
|||
}); |
|||
|
|||
queue.nThen(function () { |
|||
cb(void 0, allPaths); |
|||
}); |
|||
}); |
|||
}; |
|||
|
|||
var read = function (env, filePath, cb) { |
|||
Fs.readFile(filePath, 'utf8', function (e, str) { |
|||
if (e) { return void cb(e); } |
|||
|
|||
var task = tryParse(str); |
|||
if (!Array.isArray(task) || task.length < 2) { |
|||
env.log("INVALID_TASK", { |
|||
path: filePath, |
|||
task: task, |
|||
}); |
|||
return cb(new Error('INVALID_TASK')); |
|||
} |
|||
cb(void 0, task); |
|||
}); |
|||
}; |
|||
|
|||
var expire = function (env, task, cb) { |
|||
// TODO magic numbers, maybe turn task parsing into a function
|
|||
// and also maybe just encode tasks in a better format to start...
|
|||
var Log = env.log; |
|||
var args = task.slice(2); |
|||
|
|||
Log.info('ARCHIVAL_SCHEDULED_EXPIRATION', { |
|||
task: task, |
|||
}); |
|||
env.store.archiveChannel(args[0], function (err) { |
|||
if (err) { |
|||
Log.error('ARCHIVE_SCHEDULED_EXPIRATION_ERROR', { |
|||
task: task, |
|||
error: err, |
|||
}); |
|||
} |
|||
cb(); |
|||
}); |
|||
}; |
|||
|
|||
var run = Tasks.run = function (env, path, cb) { |
|||
var CURRENT = +new Date(); |
|||
|
|||
var Log = env.log; |
|||
var task, time, command, args; |
|||
|
|||
nThen(function (w) { |
|||
read(env, path, w(function (err, _task) { |
|||
if (err) { |
|||
w.abort(); |
|||
// there was a file but it wasn't valid?
|
|||
return void cb(err); |
|||
} |
|||
task = _task; |
|||
time = task[0]; |
|||
|
|||
if (time > CURRENT) { |
|||
w.abort(); |
|||
return cb(); |
|||
} |
|||
|
|||
command = task[1]; |
|||
args = task.slice(2); |
|||
})); |
|||
}).nThen(function (w) { |
|||
switch (command) { |
|||
case 'EXPIRE': |
|||
return void expire(env, task, w()); |
|||
default: |
|||
Log.warn("TASKS_UNKNOWN_COMMAND", task); |
|||
} |
|||
}).nThen(function () { |
|||
// remove the task file...
|
|||
remove(env, path, function (err) { |
|||
if (err) { |
|||
Log.error('TASKS_RECORD_REMOVAL', { |
|||
path: path, |
|||
err: err, |
|||
}); |
|||
} |
|||
cb(); |
|||
}); |
|||
}); |
|||
}; |
|||
|
|||
var runAll = function (env, cb) { |
|||
// check if already running and bail out if so
|
|||
if (env.running) { |
|||
return void cb("TASK_CONCURRENCY"); |
|||
} |
|||
|
|||
// if not, set a flag to block concurrency and proceed
|
|||
env.running = true; |
|||
|
|||
var paths; |
|||
nThen(function (w) { |
|||
list(env, w(function (err, _paths) { |
|||
if (err) { |
|||
w.abort(); |
|||
env.running = false; |
|||
return void cb(err); |
|||
} |
|||
paths = _paths; |
|||
})); |
|||
}).nThen(function (w) { |
|||
var done = w(); |
|||
var nt = nThen(function () {}); |
|||
paths.forEach(function (path) { |
|||
nt = nt.nThen(function (w) { |
|||
run(env, path, w(function (err) { |
|||
if (err) { |
|||
// Any errors are already logged in 'run'
|
|||
// the admin will need to review the logs and clean up
|
|||
} |
|||
})); |
|||
}); |
|||
}); |
|||
nt = nt.nThen(function () { |
|||
done(); |
|||
}); |
|||
}).nThen(function (/*w*/) { |
|||
env.running = false; |
|||
cb(); |
|||
}); |
|||
}; |
|||
|
|||
var migrate = function (env, cb) { |
|||
// list every task
|
|||
list(env, function (err, paths) { |
|||
if (err) { |
|||
return void cb(err); |
|||
} |
|||
var nt = nThen(function () {}); |
|||
paths.forEach(function (path) { |
|||
var bypass; |
|||
var task; |
|||
|
|||
nt = nt.nThen(function (w) { |
|||
// read
|
|||
read(env, path, w(function (err, _task) { |
|||
if (err) { |
|||
bypass = true; |
|||
env.log.error("TASK_MIGRATION_READ", { |
|||
error: err, |
|||
path: path, |
|||
}); |
|||
return; |
|||
} |
|||
task = _task; |
|||
})); |
|||
}).nThen(function (w) { |
|||
if (bypass) { return; } |
|||
// rewrite in new format
|
|||
write(env, task, w(function (err) { |
|||
if (err) { |
|||
bypass = true; |
|||
env.log.error("TASK_MIGRATION_WRITE", { |
|||
error: err, |
|||
task: task, |
|||
}); |
|||
} |
|||
})); |
|||
}).nThen(function (w) { |
|||
if (bypass) { return; } |
|||
// remove
|
|||
remove(env, path, w(function (err) { |
|||
if (err) { |
|||
env.log.error("TASK_MIGRATION_REMOVE", { |
|||
error: err, |
|||
path: path, |
|||
}); |
|||
} |
|||
})); |
|||
}); |
|||
}); |
|||
nt = nt.nThen(function () { |
|||
cb(); |
|||
}); |
|||
}, true); |
|||
}; |
|||
|
|||
Tasks.create = function (config, cb) { |
|||
if (!config.store) { throw new Error("E_STORE_REQUIRED"); } |
|||
if (!config.log) { throw new Error("E_LOG_REQUIRED"); } |
|||
|
|||
var env = { |
|||
root: config.taskPath || './tasks', |
|||
log: config.log, |
|||
store: config.store, |
|||
}; |
|||
|
|||
// make sure the path exists...
|
|||
Fse.mkdirp(env.root, 0x1ff, function (err) { |
|||
if (err) { return void cb(err); } |
|||
cb(void 0, { |
|||
write: function (time, command, args, cb) { |
|||
var task = encode(time, command, args); |
|||
write(env, task, cb); |
|||
}, |
|||
list: function (olderThan, cb) { |
|||
list(env, olderThan, cb); |
|||
}, |
|||
remove: function (id, cb) { |
|||
remove(env, id, cb); |
|||
}, |
|||
run: function (id, cb) { |
|||
run(env, id, cb); |
|||
}, |
|||
runAll: function (cb) { |
|||
runAll(env, cb); |
|||
}, |
|||
migrate: function (cb) { |
|||
migrate(env, cb); |
|||
}, |
|||
}); |
|||
}); |
|||
}; |
|||
|
|||
@ -0,0 +1,84 @@ |
|||
/* jshint esversion: 6 */ |
|||
/* global Buffer */ |
|||
|
|||
const ToPull = require('stream-to-pull-stream'); |
|||
const Pull = require('pull-stream'); |
|||
|
|||
const Stream = module.exports; |
|||
|
|||
// transform a stream of arbitrarily divided data
|
|||
// into a stream of buffers divided by newlines in the source stream
|
|||
// TODO see if we could improve performance by using libnewline
|
|||
const NEWLINE_CHR = ('\n').charCodeAt(0); |
|||
const mkBufferSplit = () => { |
|||
let remainder = null; |
|||
return Pull((read) => { |
|||
return (abort, cb) => { |
|||
read(abort, function (end, data) { |
|||
if (end) { |
|||
if (data) { console.log("mkBufferSplit() Data at the end"); } |
|||
cb(end, remainder ? [remainder, data] : [data]); |
|||
remainder = null; |
|||
return; |
|||
} |
|||
const queue = []; |
|||
for (;;) { |
|||
const offset = data.indexOf(NEWLINE_CHR); |
|||
if (offset < 0) { |
|||
remainder = remainder ? Buffer.concat([remainder, data]) : data; |
|||
break; |
|||
} |
|||
let subArray = data.slice(0, offset); |
|||
if (remainder) { |
|||
subArray = Buffer.concat([remainder, subArray]); |
|||
remainder = null; |
|||
} |
|||
queue.push(subArray); |
|||
data = data.slice(offset + 1); |
|||
} |
|||
cb(end, queue); |
|||
}); |
|||
}; |
|||
}, Pull.flatten()); |
|||
}; |
|||
|
|||
// return a streaming function which transforms buffers into objects
|
|||
// containing the buffer and the offset from the start of the stream
|
|||
const mkOffsetCounter = () => { |
|||
let offset = 0; |
|||
return Pull.map((buff) => { |
|||
const out = { offset: offset, buff: buff }; |
|||
// +1 for the eaten newline
|
|||
offset += buff.length + 1; |
|||
return out; |
|||
}); |
|||
}; |
|||
|
|||
// readMessagesBin asynchronously iterates over the messages in a channel log
|
|||
// the handler for each message must call back to read more, which should mean
|
|||
// that this function has a lower memory profile than our classic method
|
|||
// of reading logs line by line.
|
|||
// it also allows the handler to abort reading at any time
|
|||
Stream.readFileBin = (stream, msgHandler, cb) => { |
|||
//const stream = Fs.createReadStream(path, { start: start });
|
|||
let keepReading = true; |
|||
Pull( |
|||
ToPull.read(stream), |
|||
mkBufferSplit(), |
|||
mkOffsetCounter(), |
|||
Pull.asyncMap((data, moreCb) => { |
|||
msgHandler(data, moreCb, () => { |
|||
try { |
|||
stream.close(); |
|||
} catch (err) { |
|||
console.error("READ_FILE_BIN_ERR", err); |
|||
} |
|||
keepReading = false; |
|||
moreCb(); |
|||
}); |
|||
}), |
|||
Pull.drain(() => (keepReading), (err) => { |
|||
cb((keepReading) ? err : undefined); |
|||
}) |
|||
); |
|||
}; |
|||
@ -0,0 +1,576 @@ |
|||
/* jshint esversion: 6 */ |
|||
/* global process */ |
|||
|
|||
const HK = require("../hk-util"); |
|||
const Store = require("../storage/file"); |
|||
const BlobStore = require("../storage/blob"); |
|||
const Util = require("../common-util"); |
|||
const nThen = require("nthen"); |
|||
const Meta = require("../metadata"); |
|||
const Pins = require("../pins"); |
|||
const Core = require("../commands/core"); |
|||
const Saferphore = require("saferphore"); |
|||
const Logger = require("../log"); |
|||
const Tasks = require("../storage/tasks"); |
|||
const Nacl = require('tweetnacl/nacl-fast'); |
|||
|
|||
const Env = { |
|||
Log: {}, |
|||
}; |
|||
|
|||
// support the usual log API but pass it to the main process
|
|||
Logger.levels.forEach(function (level) { |
|||
Env.Log[level] = function (label, info) { |
|||
process.send({ |
|||
log: level, |
|||
label: label, |
|||
info: info, |
|||
}); |
|||
}; |
|||
}); |
|||
|
|||
var ready = false; |
|||
var store; |
|||
var pinStore; |
|||
var blobStore; |
|||
const init = function (config, _cb) { |
|||
const cb = Util.once(Util.mkAsync(_cb)); |
|||
if (!config) { |
|||
return void cb('E_INVALID_CONFIG'); |
|||
} |
|||
|
|||
nThen(function (w) { |
|||
Store.create(config, w(function (err, _store) { |
|||
if (err) { |
|||
w.abort(); |
|||
return void cb(err); |
|||
} |
|||
store = _store; |
|||
})); |
|||
Store.create({ |
|||
filePath: config.pinPath, |
|||
}, w(function (err, _pinStore) { |
|||
if (err) { |
|||
w.abort(); |
|||
return void cb(err); |
|||
} |
|||
pinStore = _pinStore; |
|||
})); |
|||
BlobStore.create({ |
|||
blobPath: config.blobPath, |
|||
blobStagingPath: config.blobStagingPath, |
|||
archivePath: config.archivePath, |
|||
getSession: function () {}, |
|||
}, w(function (err, blob) { |
|||
if (err) { |
|||
w.abort(); |
|||
return void cb(err); |
|||
} |
|||
blobStore = blob; |
|||
})); |
|||
}).nThen(function (w) { |
|||
Tasks.create({ |
|||
log: Env.Log, |
|||
taskPath: config.taskPath, |
|||
store: store, |
|||
}, w(function (err, tasks) { |
|||
if (err) { |
|||
w.abort(); |
|||
return void cb(err); |
|||
} |
|||
Env.tasks = tasks; |
|||
})); |
|||
}).nThen(function () { |
|||
cb(); |
|||
}); |
|||
}; |
|||
|
|||
/* computeIndex |
|||
can call back with an error or a computed index which includes: |
|||
* cpIndex: |
|||
* array including any checkpoints pushed within the last 100 messages |
|||
* processed by 'sliceCpIndex(cpIndex, line)' |
|||
* offsetByHash: |
|||
* a map containing message offsets by their hash |
|||
* this is for every message in history, so it could be very large... |
|||
* except we remove offsets from the map if they occur before the oldest relevant checkpoint |
|||
* size: in bytes |
|||
* metadata: |
|||
* validationKey |
|||
* expiration time |
|||
* owners |
|||
* ??? (anything else we might add in the future) |
|||
* line |
|||
* the number of messages in history |
|||
* including the initial metadata line, if it exists |
|||
|
|||
*/ |
|||
const computeIndex = function (data, cb) { |
|||
if (!data || !data.channel) { |
|||
return void cb('E_NO_CHANNEL'); |
|||
} |
|||
|
|||
const channelName = data.channel; |
|||
|
|||
const cpIndex = []; |
|||
let messageBuf = []; |
|||
let i = 0; |
|||
|
|||
const CB = Util.once(cb); |
|||
|
|||
const offsetByHash = {}; |
|||
let offsetCount = 0; |
|||
let size = 0; |
|||
nThen(function (w) { |
|||
// iterate over all messages in the channel log
|
|||
// old channels can contain metadata as the first message of the log
|
|||
// skip over metadata as that is handled elsewhere
|
|||
// otherwise index important messages in the log
|
|||
store.readMessagesBin(channelName, 0, (msgObj, readMore) => { |
|||
let msg; |
|||
// keep an eye out for the metadata line if you haven't already seen it
|
|||
// but only check for metadata on the first line
|
|||
if (!i && msgObj.buff.indexOf('{') === 0) { |
|||
i++; // always increment the message counter
|
|||
msg = HK.tryParse(Env, msgObj.buff.toString('utf8')); |
|||
if (typeof msg === "undefined") { return readMore(); } |
|||
|
|||
// validate that the current line really is metadata before storing it as such
|
|||
// skip this, as you already have metadata...
|
|||
if (HK.isMetadataMessage(msg)) { return readMore(); } |
|||
} |
|||
i++; |
|||
if (msgObj.buff.indexOf('cp|') > -1) { |
|||
msg = msg || HK.tryParse(Env, msgObj.buff.toString('utf8')); |
|||
if (typeof msg === "undefined") { return readMore(); } |
|||
// cache the offsets of checkpoints if they can be parsed
|
|||
if (msg[2] === 'MSG' && msg[4].indexOf('cp|') === 0) { |
|||
cpIndex.push({ |
|||
offset: msgObj.offset, |
|||
line: i |
|||
}); |
|||
// we only want to store messages since the latest checkpoint
|
|||
// so clear the buffer every time you see a new one
|
|||
messageBuf = []; |
|||
} |
|||
} else if (messageBuf.length > 100 && cpIndex.length === 0) { |
|||
// take the last 50 messages
|
|||
messageBuf = messageBuf.slice(-50); |
|||
} |
|||
// if it's not metadata or a checkpoint then it should be a regular message
|
|||
// store it in the buffer
|
|||
messageBuf.push(msgObj); |
|||
return readMore(); |
|||
}, w((err) => { |
|||
if (err && err.code !== 'ENOENT') { |
|||
w.abort(); |
|||
return void CB(err); |
|||
} |
|||
|
|||
// once indexing is complete you should have a buffer of messages since the latest checkpoint
|
|||
// or the 50-100 latest messages if the channel is of a type without checkpoints.
|
|||
// map the 'hash' of each message to its byte offset in the log, to be used for reconnecting clients
|
|||
messageBuf.forEach((msgObj) => { |
|||
const msg = HK.tryParse(Env, msgObj.buff.toString('utf8')); |
|||
if (typeof msg === "undefined") { return; } |
|||
if (msg[0] === 0 && msg[2] === 'MSG' && typeof(msg[4]) === 'string') { |
|||
// msgObj.offset is API guaranteed by our storage module
|
|||
// it should always be a valid positive integer
|
|||
offsetByHash[HK.getHash(msg[4])] = msgObj.offset; |
|||
offsetCount++; |
|||
} |
|||
// There is a trailing \n at the end of the file
|
|||
size = msgObj.offset + msgObj.buff.length + 1; |
|||
}); |
|||
})); |
|||
}).nThen(function () { |
|||
// return the computed index
|
|||
CB(null, { |
|||
// Only keep the checkpoints included in the last 100 messages
|
|||
cpIndex: HK.sliceCpIndex(cpIndex, i), |
|||
offsetByHash: offsetByHash, |
|||
offsets: offsetCount, |
|||
size: size, |
|||
//metadata: metadata,
|
|||
line: i |
|||
}); |
|||
}); |
|||
}; |
|||
|
|||
const computeMetadata = function (data, cb) { |
|||
const ref = {}; |
|||
const lineHandler = Meta.createLineHandler(ref, Env.Log.error); |
|||
return void store.readChannelMetadata(data.channel, lineHandler, function (err) { |
|||
if (err) { |
|||
// stream errors?
|
|||
return void cb(err); |
|||
} |
|||
cb(void 0, ref.meta); |
|||
}); |
|||
}; |
|||
|
|||
/* getOlderHistory |
|||
* allows clients to query for all messages until a known hash is read |
|||
* stores all messages in history as they are read |
|||
* can therefore be very expensive for memory |
|||
* should probably be converted to a streaming interface |
|||
|
|||
Used by: |
|||
* GET_HISTORY_RANGE |
|||
*/ |
|||
|
|||
const getOlderHistory = function (data, cb) { |
|||
const oldestKnownHash = data.hash; |
|||
const channelName = data.channel; |
|||
const desiredMessages = data.desiredMessages; |
|||
const desiredCheckpoint = data.desiredCheckpoint; |
|||
|
|||
var messages = []; |
|||
var found = false; |
|||
store.getMessages(channelName, function (msgStr) { |
|||
if (found) { return; } |
|||
|
|||
let parsed = HK.tryParse(Env, msgStr); |
|||
if (typeof parsed === "undefined") { return; } |
|||
|
|||
// identify classic metadata messages by their inclusion of a channel.
|
|||
// and don't send metadata, since:
|
|||
// 1. the user won't be interested in it
|
|||
// 2. this metadata is potentially incomplete/incorrect
|
|||
if (HK.isMetadataMessage(parsed)) { return; } |
|||
|
|||
var content = parsed[4]; |
|||
if (typeof(content) !== 'string') { return; } |
|||
|
|||
var hash = HK.getHash(content); |
|||
if (hash === oldestKnownHash) { |
|||
found = true; |
|||
} |
|||
messages.push(parsed); |
|||
}, function (err) { |
|||
var toSend = []; |
|||
if (typeof (desiredMessages) === "number") { |
|||
toSend = messages.slice(-desiredMessages); |
|||
} else { |
|||
let cpCount = 0; |
|||
for (var i = messages.length - 1; i >= 0; i--) { |
|||
if (/^cp\|/.test(messages[i][4]) && i !== (messages.length - 1)) { |
|||
cpCount++; |
|||
} |
|||
toSend.unshift(messages[i]); |
|||
if (cpCount >= desiredCheckpoint) { break; } |
|||
} |
|||
} |
|||
cb(err, toSend); |
|||
}); |
|||
}; |
|||
|
|||
const getPinState = function (data, cb) { |
|||
const safeKey = data.key; |
|||
|
|||
var ref = {}; |
|||
var lineHandler = Pins.createLineHandler(ref, Env.Log.error); |
|||
|
|||
// if channels aren't in memory. load them from disk
|
|||
// TODO replace with readMessagesBin
|
|||
pinStore.getMessages(safeKey, lineHandler, function () { |
|||
cb(void 0, ref.pins); // FIXME no error handling?
|
|||
}); |
|||
}; |
|||
|
|||
const _getFileSize = function (channel, _cb) { |
|||
var cb = Util.once(Util.mkAsync(_cb)); |
|||
if (!Core.isValidId(channel)) { return void cb('INVALID_CHAN'); } |
|||
if (channel.length === 32) { |
|||
return void store.getChannelSize(channel, function (e, size) { |
|||
if (e) { |
|||
if (e.code === 'ENOENT') { return void cb(void 0, 0); } |
|||
return void cb(e.code); |
|||
} |
|||
cb(void 0, size); |
|||
}); |
|||
} |
|||
|
|||
// 'channel' refers to a file, so you need another API
|
|||
blobStore.size(channel, function (e, size) { |
|||
if (typeof(size) === 'undefined') { return void cb(e); } |
|||
cb(void 0, size); |
|||
}); |
|||
}; |
|||
|
|||
const getFileSize = function (data, cb) { |
|||
_getFileSize(data.channel, cb); |
|||
}; |
|||
|
|||
const _iterateFiles = function (channels, handler, cb) { |
|||
if (!Array.isArray(channels)) { return cb('INVALID_LIST'); } |
|||
var L = channels.length; |
|||
var sem = Saferphore.create(10); |
|||
|
|||
// (channel, next) => { ??? }
|
|||
var job = function (channel, wait) { |
|||
return function (give) { |
|||
handler(channel, wait(give())); |
|||
}; |
|||
}; |
|||
|
|||
nThen(function (w) { |
|||
for (var i = 0; i < L; i++) { |
|||
sem.take(job(channels[i], w)); |
|||
} |
|||
}).nThen(function () { |
|||
cb(); |
|||
}); |
|||
}; |
|||
|
|||
const getTotalSize = function (data, cb) { |
|||
var bytes = 0; |
|||
_iterateFiles(data.channels, function (channel, next) { |
|||
_getFileSize(channel, function (err, size) { |
|||
if (!err) { bytes += size; } |
|||
next(); |
|||
}); |
|||
}, function (err) { |
|||
if (err) { return cb(err); } |
|||
cb(void 0, bytes); |
|||
}); |
|||
}; |
|||
|
|||
const getDeletedPads = function (data, cb) { |
|||
var absentees = []; |
|||
_iterateFiles(data.channels, function (channel, next) { |
|||
_getFileSize(channel, function (err, size) { |
|||
if (err) { return next(); } |
|||
if (size === 0) { absentees.push(channel); } |
|||
next(); |
|||
}); |
|||
}, function (err) { |
|||
if (err) { return void cb(err); } |
|||
cb(void 0, absentees); |
|||
}); |
|||
}; |
|||
|
|||
const getMultipleFileSize = function (data, cb) { |
|||
const counts = {}; |
|||
_iterateFiles(data.channels, function (channel, next) { |
|||
_getFileSize(channel, function (err, size) { |
|||
counts[channel] = err? 0: size; |
|||
next(); |
|||
}); |
|||
}, function (err) { |
|||
if (err) { |
|||
return void cb(err); |
|||
} |
|||
cb(void 0, counts); |
|||
}); |
|||
}; |
|||
|
|||
const getHashOffset = function (data, cb) { |
|||
const channelName = data.channel; |
|||
const lastKnownHash = data.hash; |
|||
if (typeof(lastKnownHash) !== 'string') { return void cb("INVALID_HASH"); } |
|||
|
|||
var offset = -1; |
|||
store.readMessagesBin(channelName, 0, (msgObj, readMore, abort) => { |
|||
// tryParse return a parsed message or undefined
|
|||
const msg = HK.tryParse(Env, msgObj.buff.toString('utf8')); |
|||
// if it was undefined then go onto the next message
|
|||
if (typeof msg === "undefined") { return readMore(); } |
|||
if (typeof(msg[4]) !== 'string' || lastKnownHash !== HK.getHash(msg[4])) { |
|||
return void readMore(); |
|||
} |
|||
offset = msgObj.offset; |
|||
abort(); |
|||
}, function (err) { |
|||
if (err) { return void cb(err); } |
|||
cb(void 0, offset); |
|||
}); |
|||
}; |
|||
|
|||
const removeOwnedBlob = function (data, cb) { |
|||
const blobId = data.blobId; |
|||
const safeKey = data.safeKey; |
|||
|
|||
nThen(function (w) { |
|||
// check if you have permissions
|
|||
blobStore.isOwnedBy(safeKey, blobId, w(function (err, owned) { |
|||
if (err || !owned) { |
|||
w.abort(); |
|||
return void cb("INSUFFICIENT_PERMISSIONS"); |
|||
} |
|||
})); |
|||
}).nThen(function (w) { |
|||
// remove the blob
|
|||
blobStore.archive.blob(blobId, w(function (err) { |
|||
Env.Log.info('ARCHIVAL_OWNED_FILE_BY_OWNER_RPC', { |
|||
safeKey: safeKey, |
|||
blobId: blobId, |
|||
status: err? String(err): 'SUCCESS', |
|||
}); |
|||
if (err) { |
|||
w.abort(); |
|||
return void cb(err); |
|||
} |
|||
})); |
|||
}).nThen(function () { |
|||
// archive the proof
|
|||
blobStore.archive.proof(safeKey, blobId, function (err) { |
|||
Env.Log.info("ARCHIVAL_PROOF_REMOVAL_BY_OWNER_RPC", { |
|||
safeKey: safeKey, |
|||
blobId: blobId, |
|||
status: err? String(err): 'SUCCESS', |
|||
}); |
|||
if (err) { |
|||
return void cb("E_PROOF_REMOVAL"); |
|||
} |
|||
cb(void 0, 'OK'); |
|||
}); |
|||
}); |
|||
}; |
|||
|
|||
const runTasks = function (data, cb) { |
|||
Env.tasks.runAll(cb); |
|||
}; |
|||
|
|||
const writeTask = function (data, cb) { |
|||
Env.tasks.write(data.time, data.task_command, data.args, cb); |
|||
}; |
|||
|
|||
const COMMANDS = { |
|||
COMPUTE_INDEX: computeIndex, |
|||
COMPUTE_METADATA: computeMetadata, |
|||
GET_OLDER_HISTORY: getOlderHistory, |
|||
GET_PIN_STATE: getPinState, |
|||
GET_FILE_SIZE: getFileSize, |
|||
GET_TOTAL_SIZE: getTotalSize, |
|||
GET_DELETED_PADS: getDeletedPads, |
|||
GET_MULTIPLE_FILE_SIZE: getMultipleFileSize, |
|||
GET_HASH_OFFSET: getHashOffset, |
|||
REMOVE_OWNED_BLOB: removeOwnedBlob, |
|||
RUN_TASKS: runTasks, |
|||
WRITE_TASK: writeTask, |
|||
}; |
|||
|
|||
COMMANDS.INLINE = function (data, cb) { |
|||
var signedMsg; |
|||
try { |
|||
signedMsg = Nacl.util.decodeBase64(data.msg); |
|||
} catch (e) { |
|||
return void cb('E_BAD_MESSAGE'); |
|||
} |
|||
|
|||
var validateKey; |
|||
try { |
|||
validateKey = Nacl.util.decodeBase64(data.key); |
|||
} catch (e) { |
|||
return void cb("E_BADKEY"); |
|||
} |
|||
// validate the message
|
|||
const validated = Nacl.sign.open(signedMsg, validateKey); |
|||
if (!validated) { |
|||
return void cb("FAILED"); |
|||
} |
|||
cb(); |
|||
}; |
|||
|
|||
const checkDetachedSignature = function (signedMsg, signature, publicKey) { |
|||
if (!(signedMsg && publicKey)) { return false; } |
|||
|
|||
var signedBuffer; |
|||
var pubBuffer; |
|||
var signatureBuffer; |
|||
|
|||
try { |
|||
signedBuffer = Nacl.util.decodeUTF8(signedMsg); |
|||
} catch (e) { |
|||
throw new Error("INVALID_SIGNED_BUFFER"); |
|||
} |
|||
|
|||
try { |
|||
pubBuffer = Nacl.util.decodeBase64(publicKey); |
|||
} catch (e) { |
|||
throw new Error("INVALID_PUBLIC_KEY"); |
|||
} |
|||
|
|||
try { |
|||
signatureBuffer = Nacl.util.decodeBase64(signature); |
|||
} catch (e) { |
|||
throw new Error("INVALID_SIGNATURE"); |
|||
} |
|||
|
|||
if (pubBuffer.length !== 32) { |
|||
throw new Error("INVALID_PUBLIC_KEY_LENGTH"); |
|||
} |
|||
|
|||
if (signatureBuffer.length !== 64) { |
|||
throw new Error("INVALID_SIGNATURE_LENGTH"); |
|||
} |
|||
|
|||
if (Nacl.sign.detached.verify(signedBuffer, signatureBuffer, pubBuffer) !== true) { |
|||
throw new Error("FAILED"); |
|||
} |
|||
}; |
|||
|
|||
COMMANDS.DETACHED = function (data, cb) { |
|||
try { |
|||
checkDetachedSignature(data.msg, data.sig, data.key); |
|||
} catch (err) { |
|||
return void cb(err && err.message); |
|||
} |
|||
cb(); |
|||
}; |
|||
|
|||
COMMANDS.HASH_CHANNEL_LIST = function (data, cb) { |
|||
var channels = data.channels; |
|||
if (!Array.isArray(channels)) { return void cb('INVALID_CHANNEL_LIST'); } |
|||
var uniques = []; |
|||
|
|||
channels.forEach(function (a) { |
|||
if (uniques.indexOf(a) === -1) { uniques.push(a); } |
|||
}); |
|||
uniques.sort(); |
|||
|
|||
var hash = Nacl.util.encodeBase64(Nacl.hash(Nacl |
|||
.util.decodeUTF8(JSON.stringify(uniques)))); |
|||
|
|||
cb(void 0, hash); |
|||
}; |
|||
|
|||
process.on('message', function (data) { |
|||
if (!data || !data.txid || !data.pid) { |
|||
return void process.send({ |
|||
error:'E_INVAL', |
|||
data: data, |
|||
}); |
|||
} |
|||
|
|||
const cb = function (err, value) { |
|||
process.send({ |
|||
error: err, |
|||
txid: data.txid, |
|||
pid: data.pid, |
|||
value: value, |
|||
}); |
|||
}; |
|||
|
|||
if (!ready) { |
|||
return void init(data.config, function (err) { |
|||
if (err) { return void cb(err); } |
|||
ready = true; |
|||
cb(); |
|||
}); |
|||
} |
|||
|
|||
const command = COMMANDS[data.command]; |
|||
if (typeof(command) !== 'function') { |
|||
return void cb("E_BAD_COMMAND"); |
|||
} |
|||
command(data, cb); |
|||
}); |
|||
|
|||
process.on('uncaughtException', function (err) { |
|||
console.error('[%s] UNCAUGHT EXCEPTION IN DB WORKER', new Date()); |
|||
console.error(err); |
|||
console.error("TERMINATING"); |
|||
process.exit(1); |
|||
}); |
|||
@ -0,0 +1,367 @@ |
|||
/* jshint esversion: 6 */ |
|||
/* global process */ |
|||
const Util = require("../common-util"); |
|||
const nThen = require('nthen'); |
|||
const OS = require("os"); |
|||
const { fork } = require('child_process'); |
|||
const Workers = module.exports; |
|||
const PID = process.pid; |
|||
|
|||
const DB_PATH = 'lib/workers/db-worker'; |
|||
const MAX_JOBS = 16; |
|||
|
|||
Workers.initialize = function (Env, config, _cb) { |
|||
var cb = Util.once(Util.mkAsync(_cb)); |
|||
|
|||
const workers = []; |
|||
|
|||
const response = Util.response(function (errLabel, info) { |
|||
Env.Log.error('HK_DB_WORKER__' + errLabel, info); |
|||
}); |
|||
|
|||
const Log = Env.Log; |
|||
const handleLog = function (level, label, info) { |
|||
if (typeof(Log[level]) !== 'function') { return; } |
|||
Log[level](label, info); |
|||
}; |
|||
|
|||
var isWorker = function (value) { |
|||
return value && value.worker && typeof(value.worker.send) === 'function'; |
|||
}; |
|||
|
|||
// pick ids that aren't already in use...
|
|||
const guid = function () { |
|||
var id = Util.uid(); |
|||
return response.expected(id)? guid(): id; |
|||
}; |
|||
|
|||
var workerOffset = -1; |
|||
var queue = []; |
|||
var getAvailableWorkerIndex = function () { |
|||
// If there is already a backlog of tasks you can avoid some work
|
|||
// by going to the end of the line
|
|||
if (queue.length) { return -1; } |
|||
|
|||
var L = workers.length; |
|||
if (L === 0) { |
|||
Log.error('NO_WORKERS_AVAILABLE', { |
|||
queue: queue.length, |
|||
}); |
|||
return -1; |
|||
} |
|||
|
|||
// cycle through the workers once
|
|||
// start from a different offset each time
|
|||
// return -1 if none are available
|
|||
|
|||
workerOffset = (workerOffset + 1) % L; |
|||
|
|||
var temp; |
|||
for (let i = 0; i < L; i++) { |
|||
temp = (workerOffset + i) % L; |
|||
/* I'd like for this condition to be more efficient |
|||
(`Object.keys` is sub-optimal) but I found some bugs in my initial |
|||
implementation stemming from a task counter variable going out-of-sync |
|||
with reality when a worker crashed and its tasks were re-assigned to |
|||
its substitute. I'm sure it can be done correctly and efficiently, |
|||
but this is a relatively easy way to make sure it's always up to date. |
|||
We'll see how it performs in practice before optimizing. |
|||
*/ |
|||
if (workers[temp] && Object.keys(workers[temp]).length < MAX_JOBS) { |
|||
return temp; |
|||
} |
|||
} |
|||
return -1; |
|||
}; |
|||
|
|||
var sendCommand = function (msg, _cb) { |
|||
var index = getAvailableWorkerIndex(); |
|||
|
|||
var state = workers[index]; |
|||
// if there is no worker available:
|
|||
if (!isWorker(state)) { |
|||
// queue the message for when one becomes available
|
|||
queue.push({ |
|||
msg: msg, |
|||
cb: _cb, |
|||
}); |
|||
return; |
|||
} |
|||
|
|||
var cb = Util.once(Util.mkAsync(_cb)); |
|||
|
|||
const txid = guid(); |
|||
msg.txid = txid; |
|||
msg.pid = PID; |
|||
|
|||
// track which worker is doing which jobs
|
|||
state.tasks[txid] = msg; |
|||
|
|||
response.expect(txid, cb, 60000); |
|||
state.worker.send(msg); |
|||
}; |
|||
|
|||
var handleResponse = function (state, res) { |
|||
if (!res) { return; } |
|||
// handle log messages before checking if it was addressed to your PID
|
|||
// it might still be useful to know what happened inside an orphaned worker
|
|||
if (res.log) { |
|||
return void handleLog(res.log, res.label, res.info); |
|||
} |
|||
// but don't bother handling things addressed to other processes
|
|||
// since it's basically guaranteed not to work
|
|||
if (res.pid !== PID) { |
|||
return void Log.error("WRONG_PID", res); |
|||
} |
|||
|
|||
if (!res.txid) { return; } |
|||
response.handle(res.txid, [res.error, res.value]); |
|||
delete state.tasks[res.txid]; |
|||
if (!queue.length) { return; } |
|||
|
|||
var nextMsg = queue.shift(); |
|||
/* `nextMsg` was at the top of the queue. |
|||
We know that a job just finished and all of this code |
|||
is synchronous, so calling `sendCommand` should take the worker |
|||
which was just freed up. This is somewhat fragile though, so |
|||
be careful if you want to modify this block. The risk is that |
|||
we take something that was at the top of the queue and push it |
|||
to the back because the following msg took its place. OR, in an |
|||
even worse scenario, we cycle through the queue but don't run anything. |
|||
*/ |
|||
sendCommand(nextMsg.msg, nextMsg.cb); |
|||
}; |
|||
|
|||
const initWorker = function (worker, cb) { |
|||
const txid = guid(); |
|||
|
|||
const state = { |
|||
worker: worker, |
|||
tasks: {}, |
|||
}; |
|||
|
|||
response.expect(txid, function (err) { |
|||
if (err) { return void cb(err); } |
|||
workers.push(state); |
|||
cb(void 0, state); |
|||
}, 15000); |
|||
|
|||
worker.send({ |
|||
pid: PID, |
|||
txid: txid, |
|||
config: config, |
|||
}); |
|||
|
|||
worker.on('message', function (res) { |
|||
handleResponse(state, res); |
|||
}); |
|||
|
|||
var substituteWorker = Util.once(function () { |
|||
Env.Log.info("SUBSTITUTE_DB_WORKER", ''); |
|||
var idx = workers.indexOf(state); |
|||
if (idx !== -1) { |
|||
workers.splice(idx, 1); |
|||
} |
|||
|
|||
Object.keys(state.tasks).forEach(function (txid) { |
|||
const cb = response.expectation(txid); |
|||
if (typeof(cb) !== 'function') { return; } |
|||
const task = state.tasks[txid]; |
|||
if (!task && task.msg) { return; } |
|||
response.clear(txid); |
|||
Log.info('DB_WORKER_RESEND', task.msg); |
|||
sendCommand(task.msg, cb); |
|||
}); |
|||
|
|||
var w = fork(DB_PATH); |
|||
initWorker(w, function (err, state) { |
|||
if (err) { |
|||
throw new Error(err); |
|||
} |
|||
workers.push(state); |
|||
}); |
|||
}); |
|||
|
|||
worker.on('exit', substituteWorker); |
|||
worker.on('close', substituteWorker); |
|||
worker.on('error', function (err) { |
|||
substituteWorker(); |
|||
Env.Log.error("DB_WORKER_ERROR", { |
|||
error: err, |
|||
}); |
|||
}); |
|||
}; |
|||
|
|||
nThen(function (w) { |
|||
const max = config.maxWorkers; |
|||
|
|||
var limit; |
|||
if (typeof(max) !== 'undefined') { |
|||
// the admin provided a limit on the number of workers
|
|||
if (typeof(max) === 'number' && !isNaN(max)) { |
|||
if (max < 1) { |
|||
Log.info("INSUFFICIENT_MAX_WORKERS", max); |
|||
limit = 1; |
|||
} |
|||
limit = max; |
|||
} else { |
|||
Log.error("INVALID_MAX_WORKERS", '[' + max + ']'); |
|||
} |
|||
} |
|||
|
|||
var logged; |
|||
|
|||
OS.cpus().forEach(function (cpu, index) { |
|||
if (limit && index >= limit) { |
|||
if (!logged) { |
|||
logged = true; |
|||
Log.info('WORKER_LIMIT', "(Opting not to use available CPUs beyond " + index + ')'); |
|||
} |
|||
return; |
|||
} |
|||
|
|||
initWorker(fork(DB_PATH), w(function (err) { |
|||
if (!err) { return; } |
|||
w.abort(); |
|||
return void cb(err); |
|||
})); |
|||
}); |
|||
}).nThen(function () { |
|||
Env.computeIndex = function (Env, channel, cb) { |
|||
Env.store.getWeakLock(channel, function (next) { |
|||
sendCommand({ |
|||
channel: channel, |
|||
command: 'COMPUTE_INDEX', |
|||
}, function (err, index) { |
|||
next(); |
|||
cb(err, index); |
|||
}); |
|||
}); |
|||
}; |
|||
|
|||
Env.computeMetadata = function (channel, cb) { |
|||
Env.store.getWeakLock(channel, function (next) { |
|||
sendCommand({ |
|||
channel: channel, |
|||
command: 'COMPUTE_METADATA', |
|||
}, function (err, metadata) { |
|||
next(); |
|||
cb(err, metadata); |
|||
}); |
|||
}); |
|||
}; |
|||
|
|||
Env.getOlderHistory = function (channel, oldestKnownHash, desiredMessages, desiredCheckpoint, cb) { |
|||
Env.store.getWeakLock(channel, function (next) { |
|||
sendCommand({ |
|||
channel: channel, |
|||
command: "GET_OLDER_HISTORY", |
|||
hash: oldestKnownHash, |
|||
desiredMessages: desiredMessages, |
|||
desiredCheckpoint: desiredCheckpoint, |
|||
}, Util.both(next, cb)); |
|||
}); |
|||
}; |
|||
|
|||
Env.getPinState = function (safeKey, cb) { |
|||
Env.pinStore.getWeakLock(safeKey, function (next) { |
|||
sendCommand({ |
|||
key: safeKey, |
|||
command: 'GET_PIN_STATE', |
|||
}, Util.both(next, cb)); |
|||
}); |
|||
}; |
|||
|
|||
Env.getFileSize = function (channel, cb) { |
|||
sendCommand({ |
|||
command: 'GET_FILE_SIZE', |
|||
channel: channel, |
|||
}, cb); |
|||
}; |
|||
|
|||
Env.getDeletedPads = function (channels, cb) { |
|||
sendCommand({ |
|||
command: "GET_DELETED_PADS", |
|||
channels: channels, |
|||
}, cb); |
|||
}; |
|||
|
|||
Env.getTotalSize = function (channels, cb) { |
|||
// we could take out locks for all of these channels,
|
|||
// but it's OK if the size is slightly off
|
|||
sendCommand({ |
|||
command: 'GET_TOTAL_SIZE', |
|||
channels: channels, |
|||
}, cb); |
|||
}; |
|||
|
|||
Env.getMultipleFileSize = function (channels, cb) { |
|||
sendCommand({ |
|||
command: "GET_MULTIPLE_FILE_SIZE", |
|||
channels: channels, |
|||
}, cb); |
|||
}; |
|||
|
|||
Env.getHashOffset = function (channel, hash, cb) { |
|||
Env.store.getWeakLock(channel, function (next) { |
|||
sendCommand({ |
|||
command: 'GET_HASH_OFFSET', |
|||
channel: channel, |
|||
hash: hash, |
|||
}, Util.both(next, cb)); |
|||
}); |
|||
}; |
|||
|
|||
Env.removeOwnedBlob = function (blobId, safeKey, cb) { |
|||
sendCommand({ |
|||
command: 'REMOVE_OWNED_BLOB', |
|||
blobId: blobId, |
|||
safeKey: safeKey, |
|||
}, cb); |
|||
}; |
|||
|
|||
Env.runTasks = function (cb) { |
|||
sendCommand({ |
|||
command: 'RUN_TASKS', |
|||
}, cb); |
|||
}; |
|||
|
|||
Env.writeTask = function (time, command, args, cb) { |
|||
sendCommand({ |
|||
command: 'WRITE_TASK', |
|||
time: time, |
|||
task_command: command, |
|||
args: args, |
|||
}, cb); |
|||
}; |
|||
|
|||
// Synchronous crypto functions
|
|||
Env.validateMessage = function (signedMsg, key, cb) { |
|||
sendCommand({ |
|||
msg: signedMsg, |
|||
key: key, |
|||
command: 'INLINE', |
|||
}, cb); |
|||
}; |
|||
|
|||
Env.checkSignature = function (signedMsg, signature, publicKey, cb) { |
|||
sendCommand({ |
|||
command: 'DETACHED', |
|||
sig: signature, |
|||
msg: signedMsg, |
|||
key: publicKey, |
|||
}, cb); |
|||
}; |
|||
|
|||
Env.hashChannelList = function (channels, cb) { |
|||
sendCommand({ |
|||
command: 'HASH_CHANNEL_LIST', |
|||
channels: channels, |
|||
}, cb); |
|||
}; |
|||
|
|||
cb(void 0); |
|||
}); |
|||
}; |
|||
|
|||
|
|||
1766
rpc.js
File diff suppressed because it is too large
View File
File diff suppressed because it is too large
View File
@ -0,0 +1,42 @@ |
|||
/* jshint esversion: 6, node: true */ |
|||
const nThen = require("nthen"); |
|||
const Pins = require("../lib/pins"); |
|||
const Assert = require("assert"); |
|||
|
|||
const config = require("../lib/load-config"); |
|||
|
|||
var compare = function () { |
|||
console.log(config); |
|||
var conf = { |
|||
pinPath: config.pinPath, |
|||
}; |
|||
|
|||
var list, load; |
|||
|
|||
nThen(function (w) { |
|||
Pins.list(w(function (err, p) { |
|||
if (err) { throw err; } |
|||
list = p; |
|||
console.log(p); |
|||
console.log(list); |
|||
console.log(); |
|||
}), conf); |
|||
}).nThen(function (w) { |
|||
Pins.load(w(function (err, p) { |
|||
if (err) { throw err; } |
|||
load = p; |
|||
console.log(load); |
|||
console.log(); |
|||
}), conf); |
|||
}).nThen(function () { |
|||
console.log({ |
|||
listLength: Object.keys(list).length, |
|||
loadLength: Object.keys(load).length, |
|||
}); |
|||
|
|||
Assert.deepEqual(list, load); |
|||
console.log("methods are equivalent"); |
|||
}); |
|||
}; |
|||
|
|||
compare(); |
|||
@ -0,0 +1,235 @@ |
|||
/* globals process */ |
|||
|
|||
var Client = require("../../lib/client/"); |
|||
var Crypto = require("../../www/bower_components/chainpad-crypto"); |
|||
var Mailbox = Crypto.Mailbox; |
|||
var Nacl = require("tweetnacl/nacl-fast"); |
|||
var nThen = require("nthen"); |
|||
var Pinpad = require("../../www/common/pinpad"); |
|||
var Rpc = require("../../www/common/rpc"); |
|||
var Hash = require("../../www/common/common-hash"); |
|||
var CpNetflux = require("../../www/bower_components/chainpad-netflux"); |
|||
var Util = require("../../lib/common-util"); |
|||
|
|||
// you need more than 100 messages in the history, and you need a lastKnownHash between "50" and "length - 50"
|
|||
|
|||
var createMailbox = function (config, _cb) { |
|||
var cb = Util.once(Util.mkAsync(_cb)); |
|||
|
|||
var webchannel; |
|||
var user = config.user; |
|||
user.messages = []; |
|||
|
|||
CpNetflux.start({ |
|||
network: config.network, |
|||
channel: config.channel, |
|||
crypto: config.crypto, |
|||
owners: [ config.edPublic ], |
|||
|
|||
noChainPad: true, |
|||
|
|||
lastKnownHash: config.lastKnownHash, |
|||
onChannelError: function (err) { |
|||
cb(err); |
|||
}, |
|||
onConnect: function (wc /*, sendMessage */) { |
|||
webchannel = wc; |
|||
}, |
|||
onMessage: function (msg /*, user, vKey, isCp, hash, author */) { |
|||
user.messages.push(msg); |
|||
}, |
|||
onReady: function () { |
|||
cb(void 0, webchannel); |
|||
}, |
|||
}); |
|||
}; |
|||
|
|||
process.on('unhandledRejection', function (err) { |
|||
console.error(err); |
|||
}); |
|||
|
|||
var state = {}; |
|||
|
|||
var makeCurveKeys = function () { |
|||
var pair = Nacl.box.keyPair(); |
|||
return { |
|||
curvePrivate: Nacl.util.encodeBase64(pair.secretKey), |
|||
curvePublic: Nacl.util.encodeBase64(pair.publicKey), |
|||
}; |
|||
}; |
|||
|
|||
var makeEdKeys = function () { |
|||
var keys = Nacl.sign.keyPair.fromSeed(Nacl.randomBytes(Nacl.sign.seedLength)); |
|||
return { |
|||
edPrivate: Nacl.util.encodeBase64(keys.secretKey), |
|||
edPublic: Nacl.util.encodeBase64(keys.publicKey), |
|||
}; |
|||
}; |
|||
|
|||
var edKeys = makeEdKeys(); |
|||
var curveKeys = makeCurveKeys(); |
|||
var mailboxChannel = Hash.createChannelId(); |
|||
|
|||
var createUser = function (config, cb) { |
|||
// config should contain keys for a team rpc (ed)
|
|||
// teamEdKeys
|
|||
// rosterHash
|
|||
|
|||
var user; |
|||
nThen(function (w) { |
|||
Client.create(w(function (err, client) { |
|||
if (err) { |
|||
w.abort(); |
|||
return void cb(err); |
|||
} |
|||
user = client; |
|||
user.destroy = Util.mkEvent(true); |
|||
user.destroy.reg(function () { |
|||
user.network.disconnect(); |
|||
}); |
|||
})); |
|||
}).nThen(function (w) { |
|||
// make all the parameters you'll need
|
|||
|
|||
var network = user.network = user.config.network; |
|||
user.edKeys = edKeys; |
|||
user.curveKeys = curveKeys; |
|||
|
|||
user.mailbox = Mailbox.createEncryptor(user.curveKeys); |
|||
user.mailboxChannel = mailboxChannel; |
|||
|
|||
// create an anon rpc for alice
|
|||
Rpc.createAnonymous(network, w(function (err, rpc) { |
|||
if (err) { |
|||
w.abort(); |
|||
user.shutdown(); |
|||
return void console.error('ANON_RPC_CONNECT_ERR'); |
|||
} |
|||
user.anonRpc = rpc; |
|||
user.destroy.reg(function () { |
|||
user.anonRpc.destroy(); |
|||
}); |
|||
})); |
|||
|
|||
Pinpad.create(network, user.edKeys, w(function (err, rpc) { |
|||
if (err) { |
|||
w.abort(); |
|||
user.shutdown(); |
|||
console.error(err); |
|||
return console.log('RPC_CONNECT_ERR'); |
|||
} |
|||
user.rpc = rpc; |
|||
user.destroy.reg(function () { |
|||
user.rpc.destroy(); |
|||
}); |
|||
})); |
|||
}).nThen(function (w) { |
|||
// create and subscribe to your mailbox
|
|||
createMailbox({ |
|||
user: user, |
|||
|
|||
|
|||
lastKnownHash: config.lastKnownHash, |
|||
|
|||
network: user.network, |
|||
channel: user.mailboxChannel, |
|||
crypto: user.mailbox, |
|||
edPublic: user.edKeys.edPublic, |
|||
}, w(function (err /*, wc*/) { |
|||
if (err) { |
|||
w.abort(); |
|||
//console.error("Mailbox creation error");
|
|||
cb(err); |
|||
//process.exit(1);
|
|||
} |
|||
//wc.leave();
|
|||
})); |
|||
}).nThen(function () { |
|||
user.cleanup = function (cb) { |
|||
//console.log("Destroying user");
|
|||
// TODO remove your mailbox
|
|||
user.destroy.fire(); |
|||
cb = cb; |
|||
}; |
|||
|
|||
cb(void 0, user); |
|||
}); |
|||
}; |
|||
|
|||
var alice; |
|||
|
|||
nThen(function (w) { |
|||
createUser({ |
|||
//sharedConfig
|
|||
}, w(function (err, _alice) { |
|||
if (err) { |
|||
w.abort(); |
|||
return void console.log(err); |
|||
} |
|||
alice = _alice; |
|||
alice.name = 'alice'; |
|||
})); |
|||
/* |
|||
createUser(sharedConfig, w(function (err, _bob) { |
|||
if (err) { |
|||
w.abort(); |
|||
return void console.log(err); |
|||
} |
|||
bob = _bob; |
|||
bob.name = 'bob'; |
|||
}));*/ |
|||
}).nThen(function (w) { |
|||
var i = 0; |
|||
var next = w(); |
|||
|
|||
state.hashes = []; |
|||
|
|||
var send = function () { |
|||
if (i++ >= 160) { return next(); } |
|||
|
|||
var msg = alice.mailbox.encrypt(JSON.stringify({ |
|||
pewpew: 'bangbang', |
|||
}), alice.curveKeys.curvePublic); |
|||
|
|||
var hash = msg.slice(0, 64); |
|||
state.hashes.push(hash); |
|||
|
|||
alice.anonRpc.send('WRITE_PRIVATE_MESSAGE', [ |
|||
alice.mailboxChannel, |
|||
msg |
|||
//Nacl.util.encodeBase64(Nacl.randomBytes(128))
|
|||
], w(function (err) { |
|||
if (err) { throw new Error(err); } |
|||
console.log('message %s written successfully', i); |
|||
setTimeout(send, 15); |
|||
})); |
|||
}; |
|||
send(); |
|||
}).nThen(function (w) { |
|||
console.log("Connecting with second user"); |
|||
createUser({ |
|||
lastKnownHash: state.hashes[55], |
|||
}, w(function (err, _alice) { |
|||
if (err) { |
|||
w.abort(); |
|||
console.log("lastKnownHash: ", state.hashes[55]); |
|||
console.log(err); |
|||
process.exit(1); |
|||
//return void console.log(err);
|
|||
} |
|||
var user = state.alice2 = _alice; |
|||
|
|||
if (user.messages.length === 105) { |
|||
process.exit(0); |
|||
} |
|||
//console.log(user.messages, user.messages.length);
|
|||
process.exit(1); |
|||
})); |
|||
}).nThen(function () { |
|||
|
|||
|
|||
}).nThen(function () { |
|||
alice.cleanup(); |
|||
//bob.cleanup();
|
|||
}); |
|||
|
|||
@ -0,0 +1,46 @@ |
|||
/*jshint esversion: 6 */ |
|||
const Pins = require("../../lib/pins"); |
|||
|
|||
var stats = { |
|||
users: 0, |
|||
lines: 0, // how many lines did you iterate over
|
|||
surplus: 0, // how many of those lines were not needed?
|
|||
pinned: 0, // how many files are pinned?
|
|||
duplicated: 0, |
|||
}; |
|||
|
|||
var handler = function (ref, id /* safeKey */, pinned) { |
|||
if (ref.surplus) { |
|||
//console.log("%s has %s trimmable lines", id, ref.surplus);
|
|||
stats.surplus += ref.surplus; |
|||
} |
|||
|
|||
for (var item in ref.pins) { |
|||
if (!pinned.hasOwnProperty(item)) { |
|||
//console.log("> %s is pinned", item);
|
|||
stats.pinned++; |
|||
} else { |
|||
//console.log("> %s was already pinned", item);
|
|||
stats.duplicated++; |
|||
} |
|||
} |
|||
|
|||
stats.users++; |
|||
stats.lines += ref.index; |
|||
//console.log(ref, id);
|
|||
}; |
|||
|
|||
Pins.list(function (err) { |
|||
if (err) { return void console.error(err); } |
|||
/* |
|||
for (var id in pinned) { |
|||
console.log(id); |
|||
stats.pinned++; |
|||
} |
|||
*/ |
|||
console.log(stats); |
|||
}, { |
|||
pinPath: require("../../lib/load-config").pinPath, |
|||
handler: handler, |
|||
}); |
|||
|
|||
@ -0,0 +1,41 @@ |
|||
/*jshint esversion: 6 */ |
|||
const Plan = require("../../lib/plan"); |
|||
|
|||
var rand_delay = function (f) { |
|||
setTimeout(f, Math.floor(Math.random() * 1500) + 250); |
|||
}; |
|||
|
|||
var plan = Plan(6).job(1, function (next) { |
|||
[1,2,3,4,5,6,7,8,9,10,11,12].forEach(function (n) { |
|||
plan.job(0, function (next) { |
|||
rand_delay(function () { |
|||
console.log("finishing job %s", n); |
|||
next(); |
|||
}); |
|||
}); |
|||
}); |
|||
console.log("finishing job 0"); |
|||
next(); |
|||
}).job(2, function (next) { |
|||
console.log("finishing job 13"); |
|||
|
|||
[ |
|||
100, |
|||
200, |
|||
300, |
|||
400 |
|||
].forEach(function (n) { |
|||
plan.job(3, function (next) { |
|||
rand_delay(function () { |
|||
console.log("finishing job %s", n); |
|||
next(); |
|||
}); |
|||
}); |
|||
}); |
|||
|
|||
next(); |
|||
}).done(function () { console.log("DONE"); }).start(); |
|||
|
|||
//console.log(plan);
|
|||
|
|||
//plan.start();
|
|||
@ -0,0 +1,220 @@ |
|||
/* three types of actions: |
|||
* read |
|||
* write |
|||
* append |
|||
each of which take a random amount of time |
|||
|
|||
*/ |
|||
var Util = require("../../lib/common-util"); |
|||
var schedule = require("../../lib/schedule")(); |
|||
var nThen = require("nthen"); |
|||
|
|||
var rand = function (n) { |
|||
return Math.floor(Math.random() * n); |
|||
}; |
|||
|
|||
var rand_time = function () { |
|||
// between 51 and 151
|
|||
return rand(300) + 25; |
|||
}; |
|||
|
|||
var makeAction = function (type) { |
|||
var i = 0; |
|||
return function (time) { |
|||
var j = i++; |
|||
return function (next) { |
|||
console.log(" Beginning action: %s#%s", type, j); |
|||
setTimeout(function () { |
|||
console.log(" Completed action: %s#%s", type, j); |
|||
next(); |
|||
}, time); |
|||
return j; |
|||
}; |
|||
}; |
|||
}; |
|||
|
|||
var TYPES = ['WRITE', 'READ', 'APPEND']; |
|||
var chooseAction = function () { |
|||
var n = rand(100); |
|||
|
|||
if (n < 50) { return 'APPEND'; } |
|||
if (n < 90) { return 'READ'; } |
|||
return 'WRITE'; |
|||
|
|||
//return TYPES[rand(3)];
|
|||
}; |
|||
|
|||
var test = function (script, cb) { |
|||
var uid = Util.uid(); |
|||
|
|||
var TO_RUN = script.length; |
|||
var total_run = 0; |
|||
|
|||
var parallel = 0; |
|||
var last_run_ordered = -1; |
|||
//var i = 0;
|
|||
|
|||
var ACTIONS = {}; |
|||
TYPES.forEach(function (type) { |
|||
ACTIONS[type] = makeAction(type); |
|||
}); |
|||
|
|||
nThen(function (w) { |
|||
setTimeout(w(), 3000); |
|||
// run scripted actions with assertions
|
|||
script.forEach(function (scene) { |
|||
var type = scene[0]; |
|||
var time = typeof(scene[1]) === 'number'? scene[1]: rand_time(); |
|||
|
|||
var action = ACTIONS[type](time); |
|||
console.log("Queuing action of type: %s(%s)", type, time); |
|||
|
|||
var proceed = w(); |
|||
|
|||
switch (type) { |
|||
case 'APPEND': |
|||
return schedule.ordered(uid, w(function (next) { |
|||
parallel++; |
|||
var temp = action(function () { |
|||
parallel--; |
|||
total_run++; |
|||
proceed(); |
|||
next(); |
|||
}); |
|||
if (temp !== (last_run_ordered + 1)) { |
|||
throw new Error("out of order"); |
|||
} |
|||
last_run_ordered = temp; |
|||
})); |
|||
case 'WRITE': |
|||
return schedule.blocking(uid, w(function (next) { |
|||
parallel++; |
|||
action(function () { |
|||
parallel--; |
|||
total_run++; |
|||
proceed(); |
|||
next(); |
|||
}); |
|||
if (parallel > 1) { |
|||
console.log("parallelism === %s", parallel); |
|||
throw new Error("too much parallel"); |
|||
} |
|||
})); |
|||
case 'READ': |
|||
return schedule.unordered(uid, w(function (next) { |
|||
parallel++; |
|||
action(function () { |
|||
parallel--; |
|||
total_run++; |
|||
proceed(); |
|||
next(); |
|||
}); |
|||
})); |
|||
default: |
|||
throw new Error("wut"); |
|||
} |
|||
}); |
|||
}).nThen(function () { |
|||
// make assertions about the whole script
|
|||
if (total_run !== TO_RUN) { |
|||
console.log("Ran %s / %s", total_run, TO_RUN); |
|||
throw new Error("skipped tasks"); |
|||
} |
|||
console.log("total_run === %s", total_run); |
|||
|
|||
cb(); |
|||
}); |
|||
}; |
|||
|
|||
|
|||
var randomScript = function () { |
|||
var len = rand(15) + 10; |
|||
var script = []; |
|||
while (len--) { |
|||
script.push([ |
|||
chooseAction(), |
|||
rand_time(), |
|||
]); |
|||
} |
|||
return script; |
|||
}; |
|||
|
|||
var WRITE = function (t) { |
|||
return ['WRITE', t]; |
|||
}; |
|||
var READ = function (t) { |
|||
return ['READ', t]; |
|||
}; |
|||
|
|||
var APPEND = function (t) { |
|||
return ['APPEND', t]; |
|||
}; |
|||
|
|||
nThen(function (w) { |
|||
test([ |
|||
['READ', 150], |
|||
['APPEND', 200], |
|||
['APPEND', 100], |
|||
['READ', 350], |
|||
['WRITE', 400], |
|||
['APPEND', 275], |
|||
['APPEND', 187], |
|||
['WRITE', 330], |
|||
['WRITE', 264], |
|||
['WRITE', 256], |
|||
], w(function () { |
|||
console.log("finished pre-scripted test\n"); |
|||
})); |
|||
}).nThen(function (w) { |
|||
test([ |
|||
WRITE(289), |
|||
APPEND(281), |
|||
READ(207), |
|||
WRITE(225), |
|||
READ(279), |
|||
WRITE(300), |
|||
READ(331), |
|||
APPEND(341), |
|||
APPEND(385), |
|||
READ(313), |
|||
WRITE(285), |
|||
READ(304), |
|||
APPEND(273), |
|||
APPEND(150), |
|||
WRITE(246), |
|||
READ(244), |
|||
WRITE(172), |
|||
APPEND(253), |
|||
READ(215), |
|||
READ(296), |
|||
APPEND(281), |
|||
APPEND(296), |
|||
WRITE(168), |
|||
], w(function () { |
|||
console.log("finished 2nd pre-scripted test\n"); |
|||
})); |
|||
}).nThen(function () { |
|||
var totalTests = 50; |
|||
var randomTests = 1; |
|||
|
|||
var last = nThen(function () { |
|||
console.log("beginning randomized tests"); |
|||
}); |
|||
|
|||
var queueRandomTest = function (i) { |
|||
last = last.nThen(function (w) { |
|||
console.log("running random test script #%s\n", i); |
|||
test(randomScript(), w(function () { |
|||
console.log("finished random test #%s\n", i); |
|||
})); |
|||
}); |
|||
}; |
|||
|
|||
while (randomTests <=totalTests) { queueRandomTest(randomTests++); } |
|||
|
|||
last.nThen(function () { |
|||
console.log("finished %s random tests", totalTests); |
|||
}); |
|||
}); |
|||
|
|||
|
|||
@ -1,59 +0,0 @@ |
|||
# Storage Mechanisms |
|||
|
|||
Cryptpad's message API is quite simple and modular, and it isn't especially difficult to write alternative modules that employ your favourite datastore. |
|||
|
|||
There are a few guidelines for creating a module: |
|||
|
|||
Dependencies for your storage engine **should not** be added to Cryptpad. |
|||
Instead, write an adaptor, and place it in `cryptpad/storage/yourAdaptor.js`. |
|||
|
|||
Alternatively, storage adaptors can be published to npm, and required from your config (once installed). |
|||
|
|||
## Your adaptor should conform to a simple API. |
|||
|
|||
It must export an object with a single property, `create`, which is a function. |
|||
That function must accept two arguments: |
|||
|
|||
1. an object containing configuration values |
|||
- any configuration values that you require should be well documented |
|||
- they should also be named carefully so as to avoid collisions with other modules |
|||
2. a callback |
|||
- this callback is used to return an object with (currently) two methods |
|||
- even if your storage mechanism can be executed synchronously, we use the callback pattern for portability. |
|||
|
|||
## Methods |
|||
|
|||
### message(channelName, content, handler) |
|||
|
|||
When Cryptpad receives a message, it saves it into its datastore using its equivalent of a table for its channel name, and then relays the message to every other client which is participating in the same channel. |
|||
|
|||
Relaying logic exists outside of the storage module, you simply need to store the message then execute the handler on success. |
|||
|
|||
### getMessages(channelName, handler, callback) |
|||
|
|||
When a new client joins, they request the entire history of messages for a particular channel. |
|||
This method retreives those messages, and delivers them in order. |
|||
|
|||
In practice, out of order messages make your clientside application more likely to fail, however, they are generally tolerated. |
|||
As a channel accumulates a greater number of messages, the likelihood of the application receiving them in the wrong order becomes greater. |
|||
This results in older sessions becoming less reliable. |
|||
|
|||
This function accepts the name of the channel in which the user is interested, the handler for each message, and the callback to be executed when the last message has been fetched and handled. |
|||
|
|||
**Note**, the callback is a new addition to this API. |
|||
It is only implemented within the leveldb adaptor, making our latest code incompatible with the other back ends. |
|||
While we migrate to our new Netflux API, only the leveldb adaptor will be supported. |
|||
|
|||
## removeChannel(channelName, callback) |
|||
|
|||
This method is called (optionally, see config.example.js for more info) some amount of time after the last client in a channel disconnects. |
|||
|
|||
It should remove any history of that channel, and execute a callback which takes an error message as an argument. |
|||
|
|||
## Documenting your adaptor |
|||
|
|||
Naturally, you should comment your code well before making a PR. |
|||
Failing that, you should definitely add notes to `cryptpad/config.example.js` such that people who wish to install your adaptor know how to do so. |
|||
|
|||
Notes on how to install the back end, as well as how to install the client for connecting to the back end (as is the case with many datastores), as well as how to configure cryptpad to use your adaptor. |
|||
The current configuration file should serve as an example of what to add, and how to comment. |
|||
@ -1,628 +0,0 @@ |
|||
/* globals Buffer */ |
|||
var Fs = require("fs"); |
|||
var Fse = require("fs-extra"); |
|||
var Path = require("path"); |
|||
|
|||
var BlobStore = module.exports; |
|||
var nThen = require("nthen"); |
|||
var Semaphore = require("saferphore"); |
|||
var Util = require("../lib/common-util"); |
|||
|
|||
var isValidSafeKey = function (safeKey) { |
|||
return typeof(safeKey) === 'string' && !/\//.test(safeKey) && safeKey.length === 44; |
|||
}; |
|||
|
|||
var isValidId = function (id) { |
|||
return typeof(id) === 'string' && id.length === 48 && !/[^a-f0-9]/.test(id); |
|||
}; |
|||
|
|||
// helpers
|
|||
|
|||
var prependArchive = function (Env, path) { |
|||
return Path.join(Env.archivePath, path); |
|||
}; |
|||
|
|||
// /blob/<safeKeyPrefix>/<safeKey>/<blobPrefix>/<blobId>
|
|||
var makeBlobPath = function (Env, blobId) { |
|||
return Path.join(Env.blobPath, blobId.slice(0, 2), blobId); |
|||
}; |
|||
|
|||
// /blobstate/<safeKeyPrefix>/<safeKey>
|
|||
var makeStagePath = function (Env, safeKey) { |
|||
return Path.join(Env.blobStagingPath, safeKey.slice(0, 2), safeKey); |
|||
}; |
|||
|
|||
// /blob/<safeKeyPrefix>/<safeKey>/<blobPrefix>/<blobId>
|
|||
var makeProofPath = function (Env, safeKey, blobId) { |
|||
return Path.join(Env.blobPath, safeKey.slice(0, 3), safeKey, blobId.slice(0, 2), blobId); |
|||
}; |
|||
|
|||
var parseProofPath = function (path) { |
|||
var parts = path.split('/'); |
|||
return { |
|||
blobId: parts[parts.length -1], |
|||
safeKey: parts[parts.length - 3], |
|||
}; |
|||
}; |
|||
|
|||
// getUploadSize: used by
|
|||
// getFileSize
|
|||
var getUploadSize = function (Env, blobId, cb) { |
|||
var path = makeBlobPath(Env, blobId); |
|||
if (!path) { return cb('INVALID_UPLOAD_ID'); } |
|||
Fs.stat(path, function (err, stats) { |
|||
if (err) { |
|||
// if a file was deleted, its size is 0 bytes
|
|||
if (err.code === 'ENOENT') { return cb(void 0, 0); } |
|||
return void cb(err.code); |
|||
} |
|||
cb(void 0, stats.size); |
|||
}); |
|||
}; |
|||
|
|||
// isFile: used by
|
|||
// removeOwnedBlob
|
|||
// uploadComplete
|
|||
// uploadStatus
|
|||
var isFile = function (filePath, cb) { |
|||
Fs.stat(filePath, function (e, stats) { |
|||
if (e) { |
|||
if (e.code === 'ENOENT') { return void cb(void 0, false); } |
|||
return void cb(e.message); |
|||
} |
|||
return void cb(void 0, stats.isFile()); |
|||
}); |
|||
}; |
|||
|
|||
var makeFileStream = function (full, _cb) { |
|||
var cb = Util.once(Util.mkAsync(_cb)); |
|||
Fse.mkdirp(Path.dirname(full), function (e) { |
|||
if (e || !full) { // !full for pleasing flow, it's already checked
|
|||
return void cb(e ? e.message : 'INTERNAL_ERROR'); |
|||
} |
|||
|
|||
try { |
|||
var stream = Fs.createWriteStream(full, { |
|||
flags: 'a', |
|||
encoding: 'binary', |
|||
highWaterMark: Math.pow(2, 16), |
|||
}); |
|||
stream.on('open', function () { |
|||
cb(void 0, stream); |
|||
}); |
|||
stream.on('error', function (err) { |
|||
cb(err); |
|||
}); |
|||
} catch (err) { |
|||
cb('BAD_STREAM'); |
|||
} |
|||
}); |
|||
}; |
|||
|
|||
/********** METHODS **************/ |
|||
|
|||
var upload = function (Env, safeKey, content, cb) { |
|||
var dec; |
|||
|
|||
try { dec = Buffer.from(content, 'base64'); } |
|||
catch (e) { return void cb('DECODE_BUFFER'); } |
|||
var len = dec.length; |
|||
|
|||
var session = Env.getSession(safeKey); |
|||
|
|||
if (typeof(session.currentUploadSize) !== 'number' || |
|||
typeof(session.pendingUploadSize) !== 'number') { |
|||
// improperly initialized... maybe they didn't check before uploading?
|
|||
// reject it, just in case
|
|||
return cb('NOT_READY'); |
|||
} |
|||
|
|||
if (session.currentUploadSize > session.pendingUploadSize) { |
|||
return cb('E_OVER_LIMIT'); |
|||
} |
|||
|
|||
var stagePath = makeStagePath(Env, safeKey); |
|||
|
|||
if (!session.blobstage) { |
|||
makeFileStream(stagePath, function (e, stream) { |
|||
if (!stream) { return void cb(e); } |
|||
|
|||
var blobstage = session.blobstage = stream; |
|||
blobstage.write(dec); |
|||
session.currentUploadSize += len; |
|||
cb(void 0, dec.length); |
|||
}); |
|||
} else { |
|||
session.blobstage.write(dec); |
|||
session.currentUploadSize += len; |
|||
cb(void 0, dec.length); |
|||
} |
|||
}; |
|||
|
|||
// upload_cancel
|
|||
var upload_cancel = function (Env, safeKey, fileSize, cb) { |
|||
var session = Env.getSession(safeKey); |
|||
session.pendingUploadSize = fileSize; |
|||
session.currentUploadSize = 0; |
|||
if (session.blobstage) { |
|||
session.blobstage.close(); |
|||
delete session.blobstage; |
|||
} |
|||
|
|||
var path = makeStagePath(Env, safeKey); |
|||
|
|||
Fs.unlink(path, function (e) { |
|||
if (e) { return void cb('E_UNLINK'); } |
|||
cb(void 0); |
|||
}); |
|||
}; |
|||
|
|||
// upload_complete
|
|||
var upload_complete = function (Env, safeKey, id, cb) { |
|||
var session = Env.getSession(safeKey); |
|||
|
|||
if (session.blobstage && session.blobstage.close) { |
|||
session.blobstage.close(); |
|||
delete session.blobstage; |
|||
} |
|||
|
|||
var oldPath = makeStagePath(Env, safeKey); |
|||
var newPath = makeBlobPath(Env, id); |
|||
|
|||
nThen(function (w) { |
|||
// make sure the path to your final location exists
|
|||
Fse.mkdirp(Path.dirname(newPath), function (e) { |
|||
if (e) { |
|||
w.abort(); |
|||
return void cb('RENAME_ERR'); |
|||
} |
|||
}); |
|||
}).nThen(function (w) { |
|||
// make sure there's not already something in that exact location
|
|||
isFile(newPath, function (e, yes) { |
|||
if (e) { |
|||
w.abort(); |
|||
return void cb(e); |
|||
} |
|||
if (yes) { |
|||
w.abort(); |
|||
return void cb('RENAME_ERR'); |
|||
} |
|||
cb(void 0, newPath, id); |
|||
}); |
|||
}).nThen(function () { |
|||
// finally, move the old file to the new path
|
|||
// FIXME we could just move and handle the EEXISTS instead of the above block
|
|||
Fse.move(oldPath, newPath, function (e) { |
|||
if (e) { return void cb('RENAME_ERR'); } |
|||
cb(void 0, id); |
|||
}); |
|||
}); |
|||
}; |
|||
|
|||
var tryId = function (path, cb) { |
|||
Fs.access(path, Fs.constants.R_OK | Fs.constants.W_OK, function (e) { |
|||
if (!e) { |
|||
// generate a new id (with the same prefix) and recurse
|
|||
//WARN('ownedUploadComplete', 'id is already used '+ id);
|
|||
return void cb('EEXISTS'); |
|||
} else if (e.code === 'ENOENT') { |
|||
// no entry, so it's safe for us to proceed
|
|||
return void cb(); |
|||
} else { |
|||
// it failed in an unexpected way. log it
|
|||
//WARN('ownedUploadComplete', e);
|
|||
return void cb(e.code); |
|||
} |
|||
}); |
|||
}; |
|||
|
|||
// owned_upload_complete
|
|||
var owned_upload_complete = function (Env, safeKey, id, cb) { |
|||
var session = Env.getSession(safeKey); |
|||
|
|||
// the file has already been uploaded to the staging area
|
|||
// close the pending writestream
|
|||
if (session.blobstage && session.blobstage.close) { |
|||
session.blobstage.close(); |
|||
delete session.blobstage; |
|||
} |
|||
|
|||
if (!isValidId(id)) { |
|||
//WARN('ownedUploadComplete', "id is invalid");
|
|||
return void cb('EINVAL_ID'); |
|||
} |
|||
|
|||
var oldPath = makeStagePath(Env, safeKey); |
|||
if (typeof(oldPath) !== 'string') { |
|||
return void cb('EINVAL_CONFIG'); |
|||
} |
|||
|
|||
var finalPath = makeBlobPath(Env, id); |
|||
|
|||
var finalOwnPath = makeProofPath(Env, safeKey, id); |
|||
|
|||
// the user wants to move it into blob and create a empty file with the same id
|
|||
// in their own space:
|
|||
// /blob/safeKeyPrefix/safeKey/blobPrefix/blobID
|
|||
|
|||
nThen(function (w) { |
|||
// make the requisite directory structure using Mkdirp
|
|||
Fse.mkdirp(Path.dirname(finalPath), w(function (e /*, path */) { |
|||
if (e) { // does not throw error if the directory already existed
|
|||
w.abort(); |
|||
return void cb(e.code); |
|||
} |
|||
})); |
|||
Fse.mkdirp(Path.dirname(finalOwnPath), w(function (e /*, path */) { |
|||
if (e) { // does not throw error if the directory already existed
|
|||
w.abort(); |
|||
return void cb(e.code); |
|||
} |
|||
})); |
|||
}).nThen(function (w) { |
|||
// make sure the id does not collide with another
|
|||
tryId(finalPath, w(function (e) { |
|||
if (e) { |
|||
w.abort(); |
|||
return void cb(e); |
|||
} |
|||
})); |
|||
}).nThen(function (w) { |
|||
// Create the empty file proving ownership
|
|||
Fs.writeFile(finalOwnPath, '', w(function (e) { |
|||
if (e) { |
|||
w.abort(); |
|||
return void cb(e.code); |
|||
} |
|||
// otherwise it worked...
|
|||
})); |
|||
}).nThen(function (w) { |
|||
// move the existing file to its new path
|
|||
Fse.move(oldPath, finalPath, w(function (e) { |
|||
if (e) { |
|||
// if there's an error putting the file into its final location...
|
|||
// ... you should remove the ownership file
|
|||
Fs.unlink(finalOwnPath, function () { |
|||
// but if you can't, it's not catestrophic
|
|||
// we can clean it up later
|
|||
}); |
|||
w.abort(); |
|||
return void cb(e.code); |
|||
} |
|||
// otherwise it worked...
|
|||
})); |
|||
}).nThen(function () { |
|||
// clean up their session when you're done
|
|||
// call back with the blob id...
|
|||
cb(void 0, id); |
|||
}); |
|||
}; |
|||
|
|||
// removeBlob
|
|||
var remove = function (Env, blobId, cb) { |
|||
var blobPath = makeBlobPath(Env, blobId); |
|||
Fs.unlink(blobPath, cb); // TODO COLDSTORAGE
|
|||
}; |
|||
|
|||
// removeProof
|
|||
var removeProof = function (Env, safeKey, blobId, cb) { |
|||
var proofPath = makeProofPath(Env, safeKey, blobId); |
|||
Fs.unlink(proofPath, cb); |
|||
}; |
|||
|
|||
// isOwnedBy(id, safeKey)
|
|||
var isOwnedBy = function (Env, safeKey, blobId, cb) { |
|||
var proofPath = makeProofPath(Env, safeKey, blobId); |
|||
isFile(proofPath, cb); |
|||
}; |
|||
|
|||
|
|||
// archiveBlob
|
|||
var archiveBlob = function (Env, blobId, cb) { |
|||
var blobPath = makeBlobPath(Env, blobId); |
|||
var archivePath = prependArchive(Env, blobPath); |
|||
Fse.move(blobPath, archivePath, { overwrite: true }, cb); |
|||
}; |
|||
|
|||
var removeArchivedBlob = function (Env, blobId, cb) { |
|||
var archivePath = prependArchive(Env, makeBlobPath(Env, blobId)); |
|||
Fs.unlink(archivePath, cb); |
|||
}; |
|||
|
|||
// restoreBlob
|
|||
var restoreBlob = function (Env, blobId, cb) { |
|||
var blobPath = makeBlobPath(Env, blobId); |
|||
var archivePath = prependArchive(Env, blobPath); |
|||
Fse.move(archivePath, blobPath, cb); |
|||
}; |
|||
|
|||
// archiveProof
|
|||
var archiveProof = function (Env, safeKey, blobId, cb) { |
|||
var proofPath = makeProofPath(Env, safeKey, blobId); |
|||
var archivePath = prependArchive(Env, proofPath); |
|||
Fse.move(proofPath, archivePath, { overwrite: true }, cb); |
|||
}; |
|||
|
|||
var removeArchivedProof = function (Env, safeKey, blobId, cb) { |
|||
var archivedPath = prependArchive(Env, makeProofPath(Env, safeKey, blobId)); |
|||
Fs.unlink(archivedPath, cb); |
|||
}; |
|||
|
|||
// restoreProof
|
|||
var restoreProof = function (Env, safeKey, blobId, cb) { |
|||
var proofPath = makeProofPath(Env, safeKey, blobId); |
|||
var archivePath = prependArchive(Env, proofPath); |
|||
Fse.move(archivePath, proofPath, cb); |
|||
}; |
|||
|
|||
var makeWalker = function (n, handleChild, done) { |
|||
if (!n || typeof(n) !== 'number' || n < 2) { n = 2; } |
|||
|
|||
var W; |
|||
nThen(function (w) { |
|||
// this asynchronous bit defers the completion of this block until
|
|||
// synchronous execution has completed. This means you must create
|
|||
// the walker and start using it synchronously or else it will call back
|
|||
// prematurely
|
|||
setTimeout(w()); |
|||
W = w; |
|||
}).nThen(function () { |
|||
done(); |
|||
}); |
|||
|
|||
// do no more than 20 jobs at a time
|
|||
var tasks = Semaphore.create(n); |
|||
|
|||
var recurse = function (path) { |
|||
tasks.take(function (give) { |
|||
var next = give(W()); |
|||
|
|||
nThen(function (w) { |
|||
// check if the path is a directory...
|
|||
Fs.stat(path, w(function (err, stats) { |
|||
if (err) { return next(); } |
|||
if (!stats.isDirectory()) { |
|||
w.abort(); |
|||
return void handleChild(void 0, path, next); |
|||
} |
|||
// fall through
|
|||
})); |
|||
}).nThen(function () { |
|||
// handle directories
|
|||
Fs.readdir(path, function (err, dir) { |
|||
if (err) { return next(); } |
|||
// everything is fine and it's a directory...
|
|||
dir.forEach(function (d) { |
|||
recurse(Path.join(path, d)); |
|||
}); |
|||
next(); |
|||
}); |
|||
}); |
|||
}); |
|||
}; |
|||
|
|||
return recurse; |
|||
}; |
|||
|
|||
var listProofs = function (root, handler, cb) { |
|||
Fs.readdir(root, function (err, dir) { |
|||
if (err) { return void cb(err); } |
|||
|
|||
var walk = makeWalker(20, function (err, path, next) { |
|||
// path is the path to a child node on the filesystem
|
|||
|
|||
// next handles the next job in a queue
|
|||
|
|||
// iterate over proofs
|
|||
// check for presence of corresponding files
|
|||
Fs.stat(path, function (err, stats) { |
|||
if (err) { |
|||
return void handler(err, void 0, next); |
|||
} |
|||
|
|||
var parsed = parseProofPath(path); |
|||
handler(void 0, { |
|||
path: path, |
|||
blobId: parsed.blobId, |
|||
safeKey: parsed.safeKey, |
|||
atime: stats.atime, |
|||
ctime: stats.ctime, |
|||
mtime: stats.mtime, |
|||
}, next); |
|||
}); |
|||
}, function () { |
|||
// called when there are no more directories or children to process
|
|||
cb(); |
|||
}); |
|||
|
|||
dir.forEach(function (d) { |
|||
// ignore directories that aren't 3 characters long...
|
|||
if (d.length !== 3) { return; } |
|||
walk(Path.join(root, d)); |
|||
}); |
|||
}); |
|||
}; |
|||
|
|||
var listBlobs = function (root, handler, cb) { |
|||
// iterate over files
|
|||
Fs.readdir(root, function (err, dir) { |
|||
if (err) { return void cb(err); } |
|||
var walk = makeWalker(20, function (err, path, next) { |
|||
Fs.stat(path, function (err, stats) { |
|||
if (err) { |
|||
return void handler(err, void 0, next); |
|||
} |
|||
|
|||
handler(void 0, { |
|||
blobId: Path.basename(path), |
|||
atime: stats.atime, |
|||
ctime: stats.ctime, |
|||
mtime: stats.mtime, |
|||
}, next); |
|||
}); |
|||
}, function () { |
|||
cb(); |
|||
}); |
|||
|
|||
dir.forEach(function (d) { |
|||
if (d.length !== 2) { return; } |
|||
walk(Path.join(root, d)); |
|||
}); |
|||
}); |
|||
}; |
|||
|
|||
BlobStore.create = function (config, _cb) { |
|||
var cb = Util.once(Util.mkAsync(_cb)); |
|||
if (typeof(config.getSession) !== 'function') { |
|||
return void cb("getSession method required"); |
|||
} |
|||
|
|||
var Env = { |
|||
blobPath: config.blobPath || './blob', |
|||
blobStagingPath: config.blobStagingPath || './blobstage', |
|||
archivePath: config.archivePath || './data/archive', |
|||
getSession: config.getSession, |
|||
}; |
|||
|
|||
nThen(function (w) { |
|||
var CB = Util.both(w.abort, cb); |
|||
Fse.mkdirp(Env.blobPath, w(function (e) { |
|||
if (e) { CB(e); } |
|||
})); |
|||
Fse.mkdirp(Env.blobStagingPath, w(function (e) { |
|||
if (e) { CB(e); } |
|||
})); |
|||
|
|||
Fse.mkdirp(Path.join(Env.archivePath, Env.blobPath), w(function (e) { |
|||
if (e) { CB(e); } |
|||
})); |
|||
}).nThen(function () { |
|||
var methods = { |
|||
isFileId: isValidId, |
|||
status: function (safeKey, _cb) { |
|||
// TODO check if the final destination is a file
|
|||
// because otherwise two people can try to upload to the same location
|
|||
// and one will fail, invalidating their hard work
|
|||
var cb = Util.once(Util.mkAsync(_cb)); |
|||
if (!isValidSafeKey(safeKey)) { return void cb('INVALID_SAFEKEY'); } |
|||
isFile(makeStagePath(Env, safeKey), cb); |
|||
}, |
|||
upload: function (safeKey, content, _cb) { |
|||
var cb = Util.once(Util.mkAsync(_cb)); |
|||
if (!isValidSafeKey(safeKey)) { return void cb('INVALID_SAFEKEY'); } |
|||
upload(Env, safeKey, content, Util.once(Util.mkAsync(cb))); |
|||
}, |
|||
|
|||
cancel: function (safeKey, fileSize, _cb) { |
|||
var cb = Util.once(Util.mkAsync(_cb)); |
|||
if (!isValidSafeKey(safeKey)) { return void cb('INVALID_SAFEKEY'); } |
|||
if (typeof(fileSize) !== 'number' || isNaN(fileSize) || fileSize <= 0) { return void cb("INVALID_FILESIZE"); } |
|||
upload_cancel(Env, safeKey, fileSize, cb); |
|||
}, |
|||
|
|||
isOwnedBy: function (safeKey, blobId, _cb) { |
|||
var cb = Util.once(Util.mkAsync(_cb)); |
|||
if (!isValidSafeKey(safeKey)) { return void cb('INVALID_SAFEKEY'); } |
|||
isOwnedBy(Env, safeKey, blobId, cb); |
|||
}, |
|||
|
|||
remove: { |
|||
blob: function (blobId, _cb) { |
|||
var cb = Util.once(Util.mkAsync(_cb)); |
|||
if (!isValidId(blobId)) { return void cb("INVALID_ID"); } |
|||
remove(Env, blobId, cb); |
|||
}, |
|||
proof: function (safeKey, blobId, _cb) { |
|||
var cb = Util.once(Util.mkAsync(_cb)); |
|||
if (!isValidSafeKey(safeKey)) { return void cb('INVALID_SAFEKEY'); } |
|||
if (!isValidId(blobId)) { return void cb("INVALID_ID"); } |
|||
removeProof(Env, safeKey, blobId, cb); |
|||
}, |
|||
archived: { |
|||
blob: function (blobId, _cb) { |
|||
var cb = Util.once(Util.mkAsync(_cb)); |
|||
if (!isValidId(blobId)) { return void cb("INVALID_ID"); } |
|||
removeArchivedBlob(Env, blobId, cb); |
|||
}, |
|||
proof: function (safeKey, blobId, _cb) { |
|||
var cb = Util.once(Util.mkAsync(_cb)); |
|||
if (!isValidSafeKey(safeKey)) { return void cb('INVALID_SAFEKEY'); } |
|||
if (!isValidId(blobId)) { return void cb("INVALID_ID"); } |
|||
removeArchivedProof(Env, safeKey, blobId, cb); |
|||
}, |
|||
}, |
|||
}, |
|||
|
|||
archive: { |
|||
blob: function (blobId, _cb) { |
|||
var cb = Util.once(Util.mkAsync(_cb)); |
|||
if (!isValidId(blobId)) { return void cb("INVALID_ID"); } |
|||
archiveBlob(Env, blobId, cb); |
|||
}, |
|||
proof: function (safeKey, blobId, _cb) { |
|||
var cb = Util.once(Util.mkAsync(_cb)); |
|||
if (!isValidSafeKey(safeKey)) { return void cb('INVALID_SAFEKEY'); } |
|||
if (!isValidId(blobId)) { return void cb("INVALID_ID"); } |
|||
archiveProof(Env, safeKey, blobId, cb); |
|||
}, |
|||
}, |
|||
|
|||
restore: { |
|||
blob: function (blobId, _cb) { |
|||
var cb = Util.once(Util.mkAsync(_cb)); |
|||
if (!isValidId(blobId)) { return void cb("INVALID_ID"); } |
|||
restoreBlob(Env, blobId, cb); |
|||
}, |
|||
proof: function (safeKey, blobId, _cb) { |
|||
var cb = Util.once(Util.mkAsync(_cb)); |
|||
if (!isValidSafeKey(safeKey)) { return void cb('INVALID_SAFEKEY'); } |
|||
if (!isValidId(blobId)) { return void cb("INVALID_ID"); } |
|||
restoreProof(Env, safeKey, blobId, cb); |
|||
}, |
|||
}, |
|||
|
|||
complete: function (safeKey, id, _cb) { |
|||
var cb = Util.once(Util.mkAsync(_cb)); |
|||
if (!isValidSafeKey(safeKey)) { return void cb('INVALID_SAFEKEY'); } |
|||
if (!isValidId(id)) { return void cb("INVALID_ID"); } |
|||
upload_complete(Env, safeKey, id, cb); |
|||
}, |
|||
completeOwned: function (safeKey, id, _cb) { |
|||
var cb = Util.once(Util.mkAsync(_cb)); |
|||
if (!isValidSafeKey(safeKey)) { return void cb('INVALID_SAFEKEY'); } |
|||
if (!isValidId(id)) { return void cb("INVALID_ID"); } |
|||
owned_upload_complete(Env, safeKey, id, cb); |
|||
}, |
|||
size: function (id, _cb) { |
|||
var cb = Util.once(Util.mkAsync(_cb)); |
|||
if (!isValidId(id)) { return void cb("INVALID_ID"); } |
|||
getUploadSize(Env, id, cb); |
|||
}, |
|||
|
|||
list: { |
|||
blobs: function (handler, _cb) { |
|||
var cb = Util.once(Util.mkAsync(_cb)); |
|||
listBlobs(Env.blobPath, handler, cb); |
|||
}, |
|||
proofs: function (handler, _cb) { |
|||
var cb = Util.once(Util.mkAsync(_cb)); |
|||
listProofs(Env.blobPath, handler, cb); |
|||
}, |
|||
archived: { |
|||
proofs: function (handler, _cb) { |
|||
var cb = Util.once(Util.mkAsync(_cb)); |
|||
listProofs(prependArchive(Env, Env.blobPath), handler, cb); |
|||
}, |
|||
blobs: function (handler, _cb) { |
|||
var cb = Util.once(Util.mkAsync(_cb)); |
|||
listBlobs(prependArchive(Env, Env.blobPath), handler, cb); |
|||
}, |
|||
} |
|||
}, |
|||
}; |
|||
|
|||
cb(void 0, methods); |
|||
}); |
|||
}; |
|||
|
|||
1053
storage/file.js
File diff suppressed because it is too large
View File
File diff suppressed because it is too large
View File
@ -1,413 +0,0 @@ |
|||
var Fs = require("fs"); |
|||
var Fse = require("fs-extra"); |
|||
var Path = require("path"); |
|||
var nacl = require("tweetnacl/nacl-fast"); |
|||
var nThen = require("nthen"); |
|||
|
|||
var Tasks = module.exports; |
|||
|
|||
var tryParse = function (s) { |
|||
try { return JSON.parse(s); } |
|||
catch (e) { return null; } |
|||
}; |
|||
|
|||
var encode = function (time, command, args) { |
|||
if (typeof(time) !== 'number') { return null; } |
|||
if (typeof(command) !== 'string') { return null; } |
|||
if (!Array.isArray(args)) { return [time, command]; } |
|||
return [time, command].concat(args); |
|||
}; |
|||
|
|||
/* |
|||
var randomId = function () { |
|||
var bytes = Array.prototype.slice.call(nacl.randomBytes(16)); |
|||
return bytes.map(function (b) { |
|||
var n = Number(b & 0xff).toString(16); |
|||
return n.length === 1? '0' + n: n; |
|||
}).join(''); |
|||
}; |
|||
|
|||
|
|||
var mkPath = function (env, id) { |
|||
return Path.join(env.root, id.slice(0, 2), id) + '.ndjson'; |
|||
}; |
|||
*/ |
|||
|
|||
// make a new folder every MODULUS ms
|
|||
var MODULUS = 1000 * 60 * 60 * 24; // one day
|
|||
var moduloTime = function (d) { |
|||
return d - (d % MODULUS); |
|||
}; |
|||
|
|||
var makeDirectoryId = function (d) { |
|||
return '' + moduloTime(d); |
|||
}; |
|||
|
|||
var write = function (env, task, cb) { |
|||
var str = JSON.stringify(task) + '\n'; |
|||
var id = nacl.util.encodeBase64(nacl.hash(nacl.util.decodeUTF8(str))).replace(/\//g, '-'); |
|||
|
|||
var dir = makeDirectoryId(task[0]); |
|||
var path = Path.join(env.root, dir); |
|||
|
|||
nThen(function (w) { |
|||
// create the parent directory if it does not exist
|
|||
Fse.mkdirp(path, 0x1ff, w(function (err) { |
|||
if (err) { |
|||
w.abort(); |
|||
return void cb(err); |
|||
} |
|||
})); |
|||
}).nThen(function () { |
|||
// write the file to the path
|
|||
var fullPath = Path.join(path, id + '.ndjson'); |
|||
|
|||
// the file ids are based on the hash of the file contents to be written
|
|||
// as such, writing an exact task a second time will overwrite the first with the same contents
|
|||
// this shouldn't be a problem
|
|||
|
|||
Fs.writeFile(fullPath, str, function (e) { |
|||
if (e) { |
|||
env.log.error("TASK_WRITE_FAILURE", { |
|||
error: e, |
|||
path: fullPath, |
|||
}); |
|||
return void cb(e); |
|||
} |
|||
env.log.info("SUCCESSFUL_WRITE", { |
|||
path: fullPath, |
|||
}); |
|||
cb(); |
|||
}); |
|||
}); |
|||
}; |
|||
|
|||
var remove = function (env, path, cb) { |
|||
// FIXME COLDSTORAGE?
|
|||
Fs.unlink(path, cb); |
|||
}; |
|||
|
|||
var removeDirectory = function (env, path, cb) { |
|||
Fs.rmdir(path, cb); |
|||
}; |
|||
|
|||
var list = Tasks.list = function (env, cb, migration) { |
|||
var rootDirs; |
|||
|
|||
nThen(function (w) { |
|||
// read the root directory
|
|||
Fs.readdir(env.root, w(function (e, list) { |
|||
if (e) { |
|||
env.log.error("TASK_ROOT_DIR", { |
|||
root: env.root, |
|||
error: e, |
|||
}); |
|||
return void cb(e); |
|||
} |
|||
if (list.length === 0) { |
|||
w.abort(); |
|||
return void cb(void 0, []); |
|||
} |
|||
rootDirs = list; |
|||
})); |
|||
}).nThen(function () { |
|||
// schedule the nested directories for exploration
|
|||
// return a list of paths to tasks
|
|||
var queue = nThen(function () {}); |
|||
|
|||
var allPaths = []; |
|||
|
|||
var currentWindow = moduloTime(+new Date() + MODULUS); |
|||
|
|||
// We prioritize a small footprint over speed, so we
|
|||
// iterate over directories in serial rather than parallel
|
|||
rootDirs.forEach(function (dir) { |
|||
// if a directory is two characters, it's the old format
|
|||
// otherwise, it indicates when the file is set to expire
|
|||
// so we can ignore directories which are clearly in the future
|
|||
|
|||
var dirTime; |
|||
if (migration) { |
|||
// this block handles migrations. ignore new formats
|
|||
if (dir.length !== 2) { |
|||
return; |
|||
} |
|||
} else { |
|||
// not in migration mode, check if it's a new format
|
|||
if (dir.length >= 2) { |
|||
// might be the new format.
|
|||
// check its time to see if it should be skipped
|
|||
dirTime = parseInt(dir); |
|||
if (!isNaN(dirTime) && dirTime >= currentWindow) { |
|||
return; |
|||
} |
|||
} |
|||
} |
|||
|
|||
queue.nThen(function (w) { |
|||
var subPath = Path.join(env.root, dir); |
|||
Fs.readdir(subPath, w(function (e, paths) { |
|||
if (e) { |
|||
env.log.error("TASKS_INVALID_SUBDIR", { |
|||
path: subPath, |
|||
error: e, |
|||
}); |
|||
return; |
|||
} |
|||
|
|||
if (paths.length === 0) { |
|||
removeDirectory(env, subPath, function (err) { |
|||
if (err) { |
|||
env.log.error('TASKS_REMOVE_EMPTY_DIRECTORY', { |
|||
error: err, |
|||
path: subPath, |
|||
}); |
|||
} |
|||
}); |
|||
} |
|||
|
|||
// concat in place
|
|||
Array.prototype.push.apply(allPaths, paths.map(function (p) { |
|||
return Path.join(subPath, p); |
|||
})); |
|||
})); |
|||
}); |
|||
}); |
|||
|
|||
queue.nThen(function () { |
|||
cb(void 0, allPaths); |
|||
}); |
|||
}); |
|||
}; |
|||
|
|||
var read = function (env, filePath, cb) { |
|||
Fs.readFile(filePath, 'utf8', function (e, str) { |
|||
if (e) { return void cb(e); } |
|||
|
|||
var task = tryParse(str); |
|||
if (!Array.isArray(task) || task.length < 2) { |
|||
env.log("INVALID_TASK", { |
|||
path: filePath, |
|||
task: task, |
|||
}); |
|||
return cb(new Error('INVALID_TASK')); |
|||
} |
|||
cb(void 0, task); |
|||
}); |
|||
}; |
|||
|
|||
var expire = function (env, task, cb) { |
|||
// TODO magic numbers, maybe turn task parsing into a function
|
|||
// and also maybe just encode tasks in a better format to start...
|
|||
var Log = env.log; |
|||
var args = task.slice(2); |
|||
|
|||
if (!env.retainData) { |
|||
Log.info('DELETION_SCHEDULED_EXPIRATION', { |
|||
task: task, |
|||
}); |
|||
env.store.removeChannel(args[0], function (err) { |
|||
if (err) { |
|||
Log.error('DELETION_SCHEDULED_EXPIRATION_ERROR', { |
|||
task: task, |
|||
error: err, |
|||
}); |
|||
} |
|||
cb(); |
|||
}); |
|||
return; |
|||
} |
|||
|
|||
Log.info('ARCHIVAL_SCHEDULED_EXPIRATION', { |
|||
task: task, |
|||
}); |
|||
env.store.archiveChannel(args[0], function (err) { |
|||
if (err) { |
|||
Log.error('ARCHIVE_SCHEDULED_EXPIRATION_ERROR', { |
|||
task: task, |
|||
error: err, |
|||
}); |
|||
} |
|||
cb(); |
|||
}); |
|||
}; |
|||
|
|||
var run = Tasks.run = function (env, path, cb) { |
|||
var CURRENT = +new Date(); |
|||
|
|||
var Log = env.log; |
|||
var task, time, command, args; |
|||
|
|||
nThen(function (w) { |
|||
read(env, path, w(function (err, _task) { |
|||
if (err) { |
|||
w.abort(); |
|||
// there was a file but it wasn't valid?
|
|||
return void cb(err); |
|||
} |
|||
task = _task; |
|||
time = task[0]; |
|||
|
|||
if (time > CURRENT) { |
|||
w.abort(); |
|||
return cb(); |
|||
} |
|||
|
|||
command = task[1]; |
|||
args = task.slice(2); |
|||
})); |
|||
}).nThen(function (w) { |
|||
switch (command) { |
|||
case 'EXPIRE': |
|||
return void expire(env, task, w()); |
|||
default: |
|||
Log.warn("TASKS_UNKNOWN_COMMAND", task); |
|||
} |
|||
}).nThen(function () { |
|||
// remove the task file...
|
|||
remove(env, path, function (err) { |
|||
if (err) { |
|||
Log.error('TASKS_RECORD_REMOVAL', { |
|||
path: path, |
|||
err: err, |
|||
}); |
|||
} |
|||
cb(); |
|||
}); |
|||
}); |
|||
}; |
|||
|
|||
var runAll = function (env, cb) { |
|||
// check if already running and bail out if so
|
|||
if (env.running) { |
|||
return void cb("TASK_CONCURRENCY"); |
|||
} |
|||
|
|||
// if not, set a flag to block concurrency and proceed
|
|||
env.running = true; |
|||
|
|||
var paths; |
|||
nThen(function (w) { |
|||
list(env, w(function (err, _paths) { |
|||
if (err) { |
|||
w.abort(); |
|||
env.running = false; |
|||
return void cb(err); |
|||
} |
|||
paths = _paths; |
|||
})); |
|||
}).nThen(function (w) { |
|||
var done = w(); |
|||
var nt = nThen(function () {}); |
|||
paths.forEach(function (path) { |
|||
nt = nt.nThen(function (w) { |
|||
run(env, path, w(function (err) { |
|||
if (err) { |
|||
// Any errors are already logged in 'run'
|
|||
// the admin will need to review the logs and clean up
|
|||
} |
|||
})); |
|||
}); |
|||
}); |
|||
nt = nt.nThen(function () { |
|||
done(); |
|||
}); |
|||
}).nThen(function (/*w*/) { |
|||
env.running = false; |
|||
cb(); |
|||
}); |
|||
}; |
|||
|
|||
var migrate = function (env, cb) { |
|||
// list every task
|
|||
list(env, function (err, paths) { |
|||
if (err) { |
|||
return void cb(err); |
|||
} |
|||
var nt = nThen(function () {}); |
|||
paths.forEach(function (path) { |
|||
var bypass; |
|||
var task; |
|||
|
|||
nt = nt.nThen(function (w) { |
|||
// read
|
|||
read(env, path, w(function (err, _task) { |
|||
if (err) { |
|||
bypass = true; |
|||
env.log.error("TASK_MIGRATION_READ", { |
|||
error: err, |
|||
path: path, |
|||
}); |
|||
return; |
|||
} |
|||
task = _task; |
|||
})); |
|||
}).nThen(function (w) { |
|||
if (bypass) { return; } |
|||
// rewrite in new format
|
|||
write(env, task, w(function (err) { |
|||
if (err) { |
|||
bypass = true; |
|||
env.log.error("TASK_MIGRATION_WRITE", { |
|||
error: err, |
|||
task: task, |
|||
}); |
|||
} |
|||
})); |
|||
}).nThen(function (w) { |
|||
if (bypass) { return; } |
|||
// remove
|
|||
remove(env, path, w(function (err) { |
|||
if (err) { |
|||
env.log.error("TASK_MIGRATION_REMOVE", { |
|||
error: err, |
|||
path: path, |
|||
}); |
|||
} |
|||
})); |
|||
}); |
|||
}); |
|||
nt = nt.nThen(function () { |
|||
cb(); |
|||
}); |
|||
}, true); |
|||
}; |
|||
|
|||
Tasks.create = function (config, cb) { |
|||
if (!config.store) { throw new Error("E_STORE_REQUIRED"); } |
|||
if (!config.log) { throw new Error("E_LOG_REQUIRED"); } |
|||
|
|||
var env = { |
|||
root: config.taskPath || './tasks', |
|||
log: config.log, |
|||
store: config.store, |
|||
retainData: Boolean(config.retainData), |
|||
}; |
|||
|
|||
// make sure the path exists...
|
|||
Fse.mkdirp(env.root, 0x1ff, function (err) { |
|||
if (err) { return void cb(err); } |
|||
cb(void 0, { |
|||
write: function (time, command, args, cb) { |
|||
var task = encode(time, command, args); |
|||
write(env, task, cb); |
|||
}, |
|||
list: function (olderThan, cb) { |
|||
list(env, olderThan, cb); |
|||
}, |
|||
remove: function (id, cb) { |
|||
remove(env, id, cb); |
|||
}, |
|||
run: function (id, cb) { |
|||
run(env, id, cb); |
|||
}, |
|||
runAll: function (cb) { |
|||
runAll(env, cb); |
|||
}, |
|||
migrate: function (cb) { |
|||
migrate(env, cb); |
|||
}, |
|||
}); |
|||
}); |
|||
}; |
|||
|
|||
@ -0,0 +1,750 @@ |
|||
define([ |
|||
'/common/common-util.js', |
|||
'/common/sframe-common-codemirror.js', |
|||
'/customize/messages.js', |
|||
'/bower_components/chainpad/chainpad.dist.js', |
|||
], function (Util, SFCodeMirror, Messages, ChainPad) { |
|||
var Markers = {}; |
|||
|
|||
/* TODO Known Issues |
|||
* 1. ChainPad diff is not completely accurate: we're not aware of the other user's cursor |
|||
position so if they insert an "a" in the middle of "aaaaa", the diff will think that |
|||
the "a" was inserted at the end of this sequence. This is not an issue for the content |
|||
but it will cause issues for the colors |
|||
2. ChainPad doesn't always provide the good result in case of conflict (?) |
|||
e.g. Alice is inserting "pew" at offset 10, Bob is removing 1 character at offset 10 |
|||
The expected result is to have "pew" and the following character deleted |
|||
In some cases, the result is "ew" inserted and the following character not deleted |
|||
*/ |
|||
|
|||
var debug = function () {}; |
|||
|
|||
var MARK_OPACITY = 0.5; |
|||
var DEFAULT = { |
|||
authors: {}, |
|||
marks: [[-1, 0, 0, Number.MAX_SAFE_INTEGER, Number.MAX_SAFE_INTEGER]] |
|||
}; |
|||
|
|||
var addMark = function (Env, from, to, uid) { |
|||
if (!Env.enabled) { return; } |
|||
var author = Env.authormarks.authors[uid] || {}; |
|||
if (uid === -1) { |
|||
return void Env.editor.markText(from, to, { |
|||
css: "background-color: transparent", |
|||
attributes: { |
|||
'data-type': 'authormark', |
|||
'data-uid': uid |
|||
} |
|||
}); |
|||
} |
|||
uid = Number(uid); |
|||
var name = Util.fixHTML(author.name || Messages.anonymous); |
|||
var col = Util.hexToRGB(author.color); |
|||
var rgba = 'rgba('+col[0]+','+col[1]+','+col[2]+','+Env.opacity+');'; |
|||
return Env.editor.markText(from, to, { |
|||
inclusiveLeft: uid === Env.myAuthorId, |
|||
inclusiveRight: uid === Env.myAuthorId, |
|||
css: "background-color: " + rgba, |
|||
attributes: { |
|||
title: Env.opacity ? Messages._getKey('cba_writtenBy', [name]) : '', |
|||
'data-type': 'authormark', |
|||
'data-uid': uid |
|||
} |
|||
}); |
|||
}; |
|||
var sortMarks = function (a, b) { |
|||
if (!Array.isArray(b)) { return -1; } |
|||
if (!Array.isArray(a)) { return 1; } |
|||
// Check line
|
|||
if (a[1] < b[1]) { return -1; } |
|||
if (a[1] > b[1]) { return 1; } |
|||
// Same line: check start offset
|
|||
if (a[2] < b[2]) { return -1; } |
|||
if (a[2] > b[2]) { return 1; } |
|||
return 0; |
|||
}; |
|||
|
|||
/* Formats: |
|||
[uid, startLine, startCh, endLine, endCh] (multi line) |
|||
[uid, startLine, startCh, endCh] (single line) |
|||
[uid, startLine, startCh] (single character) |
|||
*/ |
|||
var parseMark = Markers.parseMark = function (array) { |
|||
if (!Array.isArray(array)) { return {}; } |
|||
var multiline = typeof(array[4]) !== "undefined"; |
|||
var singleChar = typeof(array[3]) === "undefined"; |
|||
return { |
|||
uid: array[0], |
|||
startLine: array[1], |
|||
startCh: array[2], |
|||
endLine: multiline ? array[3] : array[1], |
|||
endCh: singleChar ? (array[2]+1) : (multiline ? array[4] : array[3]) |
|||
}; |
|||
}; |
|||
|
|||
var setAuthorMarks = function (Env, authormarks) { |
|||
if (!Env.enabled) { |
|||
Env.authormarks = {}; |
|||
return; |
|||
} |
|||
authormarks = authormarks || {}; |
|||
if (!authormarks.marks) { authormarks.marks = Util.clone(DEFAULT.marks); } |
|||
if (!authormarks.authors) { authormarks.authors = Util.clone(DEFAULT.authors); } |
|||
Env.oldMarks = Env.authormarks; |
|||
Env.authormarks = authormarks; |
|||
}; |
|||
|
|||
var getAuthorMarks = function (Env) { |
|||
return Env.authormarks; |
|||
}; |
|||
|
|||
var updateAuthorMarks = function (Env) { |
|||
if (!Env.enabled) { return; } |
|||
|
|||
// get author marks
|
|||
var _marks = []; |
|||
var all = []; |
|||
|
|||
var i = 0; |
|||
Env.editor.getAllMarks().forEach(function (mark) { |
|||
var pos = mark.find(); |
|||
var attributes = mark.attributes || {}; |
|||
if (!pos || attributes['data-type'] !== 'authormark') { return; } |
|||
|
|||
|
|||
var uid = Number(attributes['data-uid']) || 0; |
|||
|
|||
all.forEach(function (obj) { |
|||
if (obj.uid !== uid) { return; } |
|||
if (obj.removed) { return; } |
|||
// Merge left
|
|||
if (obj.pos.to.line === pos.from.line && obj.pos.to.ch === pos.from.ch) { |
|||
obj.removed = true; |
|||
_marks[obj.index] = undefined; |
|||
obj.mark.clear(); |
|||
mark.clear(); |
|||
mark = addMark(Env, obj.pos.from, pos.to, uid); |
|||
pos.from = obj.pos.from; |
|||
return; |
|||
} |
|||
// Merge right
|
|||
if (obj.pos.from.line === pos.to.line && obj.pos.from.ch === pos.to.ch) { |
|||
obj.removed = true; |
|||
_marks[obj.index] = undefined; |
|||
obj.mark.clear(); |
|||
mark.clear(); |
|||
mark = addMark(Env, pos.from, obj.pos.to, uid); |
|||
pos.to = obj.pos.to; |
|||
} |
|||
}); |
|||
|
|||
var array = [uid, pos.from.line, pos.from.ch]; |
|||
if (pos.from.line === pos.to.line && pos.to.ch > (pos.from.ch+1)) { |
|||
// If there is more than 1 character, add the "to" character
|
|||
array.push(pos.to.ch); |
|||
} else if (pos.from.line !== pos.to.line) { |
|||
// If the mark is on more than one line, add the "to" line data
|
|||
Array.prototype.push.apply(array, [pos.to.line, pos.to.ch]); |
|||
} |
|||
_marks.push(array); |
|||
all.push({ |
|||
uid: uid, |
|||
pos: pos, |
|||
mark: mark, |
|||
index: i |
|||
}); |
|||
i++; |
|||
}); |
|||
_marks.sort(sortMarks); |
|||
debug('warn', _marks); |
|||
Env.authormarks.marks = _marks.filter(Boolean); |
|||
}; |
|||
|
|||
// Fix all marks located after the given operation in the provided document
|
|||
var fixMarksFromOp = function (Env, op, marks, doc) { |
|||
var pos = SFCodeMirror.posToCursor(op.offset, doc); // pos of start offset
|
|||
var rPos = SFCodeMirror.posToCursor(op.offset + op.toRemove, doc); // end of removed content
|
|||
var removed = doc.slice(op.offset, op.offset + op.toRemove).split('\n'); // removed content
|
|||
var added = op.toInsert.split('\n'); // added content
|
|||
var posEndLine = pos.line + added.length - 1; // end line after op
|
|||
var posEndCh = added[added.length - 1].length; // end ch after op
|
|||
var addLine = added.length - removed.length; |
|||
var addCh = added[added.length - 1].length - removed[removed.length - 1].length; |
|||
if (addLine > 0) { addCh -= pos.ch; } |
|||
else if (addLine < 0) { addCh += pos.ch; } |
|||
else { posEndCh += pos.ch; } |
|||
|
|||
var splitted; |
|||
|
|||
marks.forEach(function (mark, i) { |
|||
if (!mark) { return; } |
|||
var p = parseMark(mark); |
|||
// Don't update marks located before the operation
|
|||
if (p.endLine < pos.line || (p.endLine === pos.line && p.endCh < pos.ch)) { return; } |
|||
// Remove markers that have been deleted by my changes
|
|||
if ((p.startLine > pos.line || (p.startLine === pos.line && p.startCh >= pos.ch)) && |
|||
(p.endLine < rPos.line || (p.endLine === rPos.line && p.endCh <= rPos.ch))) { |
|||
marks[i] = undefined; |
|||
return; |
|||
} |
|||
// Update markers that have been cropped right
|
|||
if (p.endLine < rPos.line || (p.endLine === rPos.line && p.endCh <= rPos.ch)) { |
|||
mark[3] = pos.line; |
|||
mark[4] = pos.ch; |
|||
return; |
|||
} |
|||
// Update markers that have been cropped left. This markers will be affected by
|
|||
// my toInsert so don't abort
|
|||
if (p.startLine < rPos.line || (p.startLine === rPos.line && p.startCh < rPos.ch)) { |
|||
// If our change will split an existing mark, put the existing mark after the change
|
|||
// and create a new mark before
|
|||
if (p.startLine < pos.line || (p.startLine === pos.line && p.startCh < pos.ch)) { |
|||
splitted = [mark[0], mark[1], mark[2], pos.line, pos.ch]; |
|||
} |
|||
mark[1] = rPos.line; |
|||
mark[2] = rPos.ch; |
|||
} |
|||
// Apply my toInsert the to remaining marks
|
|||
mark[1] += addLine; |
|||
if (typeof(mark[4]) !== "undefined") { mark[3] += addLine; } |
|||
|
|||
if (mark[1] === posEndLine) { |
|||
mark[2] += addCh; |
|||
if (typeof(mark[4]) === "undefined" && typeof(mark[3]) !== "undefined") { |
|||
mark[3] += addCh; |
|||
} else if (typeof(mark[4]) !== "undefined" && mark[3] === posEndLine) { |
|||
mark[4] += addCh; |
|||
} |
|||
} |
|||
}); |
|||
if (op.toInsert.length) { |
|||
marks.push([Env.myAuthorId, pos.line, pos.ch, posEndLine, posEndCh]); |
|||
} |
|||
if (splitted) { |
|||
marks.push(splitted); |
|||
} |
|||
marks.sort(sortMarks); |
|||
}; |
|||
|
|||
// Remove marks added by OT and fix the incorrect ones
|
|||
// first: data about the change with the lowest offset
|
|||
// last: data about the change with the latest offset
|
|||
// in the comments, "I" am "first"
|
|||
var fixMarks = function (Env, first, last, content, toKeepEnd) { |
|||
var toKeep = []; |
|||
var toJoin = {}; |
|||
|
|||
debug('error', "Fix marks"); |
|||
debug('warn', first); |
|||
debug('warn', last); |
|||
|
|||
if (first.me !== last.me) { |
|||
// Get their start position compared to the authDoc
|
|||
var lastAuthOffset = last.offset + last.total; |
|||
var lastAuthPos = SFCodeMirror.posToCursor(lastAuthOffset, last.doc); |
|||
// Get their start position compared to the localDoc
|
|||
var lastLocalOffset = last.offset + first.total; |
|||
var lastLocalPos = SFCodeMirror.posToCursor(lastLocalOffset, first.doc); |
|||
|
|||
// Keep their changes in the marks (after their offset)
|
|||
last.marks.some(function (array, i) { |
|||
var p = parseMark(array); |
|||
// End of the mark before offset? ignore
|
|||
if (p.endLine < lastAuthPos.line) { return; } |
|||
// Take everything from the first mark ending after the pos
|
|||
if (p.endLine > lastAuthPos.line || p.endCh >= lastAuthPos.ch) { |
|||
toKeep = last.marks.slice(i); |
|||
last.marks.splice(i); |
|||
return true; |
|||
} |
|||
}); |
|||
// Keep my marks (based on currentDoc) before their changes
|
|||
first.marks.some(function (array, i) { |
|||
var p = parseMark(array); |
|||
// End of the mark before offset? ignore
|
|||
if (p.endLine < lastLocalPos.line) { return; } |
|||
// Take everything from the first mark ending after the pos
|
|||
if (p.endLine > lastLocalPos.line || p.endCh >= lastLocalPos.ch) { |
|||
first.marks.splice(i); |
|||
return true; |
|||
} |
|||
}); |
|||
} |
|||
|
|||
// If we still have markers in "first", store the last one so that we can "join"
|
|||
// everything at the end
|
|||
if (first.marks.length) { |
|||
var toJoinMark = first.marks[first.marks.length - 1].slice(); |
|||
toJoin = parseMark(toJoinMark); |
|||
} |
|||
|
|||
|
|||
// Add the new markers to the result
|
|||
Array.prototype.unshift.apply(toKeepEnd, toKeep); |
|||
|
|||
debug('warn', toJoin); |
|||
debug('warn', toKeep); |
|||
debug('warn', toKeepEnd); |
|||
|
|||
// Fix their offset: compute added lines and added characters on the last line
|
|||
// using the chainpad operation data (toInsert and toRemove)
|
|||
var pos = SFCodeMirror.posToCursor(first.offset, content); |
|||
var removed = content.slice(first.offset, first.offset + first.toRemove).split('\n'); |
|||
var added = first.toInsert.split('\n'); |
|||
var posEndLine = pos.line + added.length - 1; // end line after op
|
|||
var addLine = added.length - removed.length; |
|||
var addCh = added[added.length - 1].length - removed[removed.length - 1].length; |
|||
if (addLine > 0) { addCh -= pos.ch; } |
|||
if (addLine < 0) { addCh += pos.ch; } |
|||
toKeepEnd.forEach(function (array) { |
|||
// Push to correct lines
|
|||
array[1] += addLine; |
|||
if (typeof(array[4]) !== "undefined") { array[3] += addLine; } |
|||
// If they have markers on my end line, push their "ch"
|
|||
if (array[1] === posEndLine) { |
|||
array[2] += addCh; |
|||
// If they have no end line, it means end line === start line,
|
|||
// so we also push their end offset
|
|||
if (typeof(array[4]) === "undefined" && typeof(array[3]) !== "undefined") { |
|||
array[3] += addCh; |
|||
} else if (typeof(array[4]) !== "undefined" && array[3] === posEndLine) { |
|||
array[4] += addCh; |
|||
} |
|||
} |
|||
}); |
|||
|
|||
if (toKeep.length && toJoin && typeof(toJoin.endLine) !== "undefined" |
|||
&& typeof(toJoin.endCh) !== "undefined") { |
|||
// Make sure the marks are joined correctly:
|
|||
// fix the start position of the marks to keep
|
|||
// Note: we must preserve the same end for this mark if it was single line!
|
|||
if (typeof(toKeepEnd[0][4]) === "undefined") { // Single line
|
|||
toKeepEnd[0][4] = toKeepEnd[0][3] || (toKeepEnd[0][2]+1); // preserve end ch
|
|||
toKeepEnd[0][3] = toKeepEnd[0][1]; // preserve end line
|
|||
} |
|||
toKeepEnd[0][1] = toJoin.endLine; |
|||
toKeepEnd[0][2] = toJoin.endCh; |
|||
} |
|||
|
|||
debug('log', 'Fixed'); |
|||
debug('warn', toKeepEnd); |
|||
}; |
|||
|
|||
var checkMarks = function (Env, userDoc) { |
|||
|
|||
var chainpad = Env.framework._.cpNfInner.chainpad; |
|||
var editor = Env.editor; |
|||
var CodeMirror = Env.CodeMirror; |
|||
|
|||
Env.enabled = Boolean(userDoc.authormarks && userDoc.authormarks.marks); |
|||
setAuthorMarks(Env, userDoc.authormarks); |
|||
|
|||
if (!Env.enabled) { return; } |
|||
|
|||
debug('error', 'Check marks'); |
|||
|
|||
var authDoc = JSON.parse(chainpad.getAuthDoc() || '{}'); |
|||
if (!authDoc.content || !userDoc.content) { return; } |
|||
|
|||
var authPatch = chainpad.getAuthBlock(); |
|||
if (authPatch.isFromMe) { |
|||
debug('log', 'Switch branch, from me'); |
|||
debug('log', authDoc.content); |
|||
debug('log', authDoc.authormarks.marks); |
|||
debug('log', userDoc.content); |
|||
// We're switching to a different branch that was created by us.
|
|||
// We can't trust localDoc anymore because it contains data from the other branch
|
|||
// It means the only changes that we need to consider are ours.
|
|||
// Diff between userDoc and authDoc to see what we changed
|
|||
var _myOps = ChainPad.Diff.diff(authDoc.content, userDoc.content).reverse(); |
|||
var authormarks = Util.clone(authDoc.authormarks); |
|||
_myOps.forEach(function (op) { |
|||
fixMarksFromOp(Env, op, authormarks.marks, authDoc.content); |
|||
}); |
|||
authormarks.marks = authormarks.marks.filter(Boolean); |
|||
debug('log', 'Fixed marks'); |
|||
debug('warn', authormarks.marks); |
|||
setAuthorMarks(Env, authormarks); |
|||
return; |
|||
} |
|||
|
|||
|
|||
var oldMarks = Env.oldMarks; |
|||
|
|||
|
|||
if (authDoc.content === userDoc.content) { return; } // No uncommitted work
|
|||
|
|||
if (!userDoc.authormarks || !Array.isArray(userDoc.authormarks.marks)) { return; } |
|||
|
|||
debug('warn', 'Begin...'); |
|||
|
|||
var localDoc = CodeMirror.canonicalize(editor.getValue()); |
|||
|
|||
var commonParent = chainpad.getAuthBlock().getParent().getContent().doc; |
|||
var content = JSON.parse(commonParent || '{}').content || ''; |
|||
|
|||
var theirOps = ChainPad.Diff.diff(content, authDoc.content); |
|||
var myOps = ChainPad.Diff.diff(content, localDoc); |
|||
|
|||
debug('log', theirOps); |
|||
debug('log', myOps); |
|||
|
|||
if (!myOps.length || !theirOps.length) { return; } |
|||
|
|||
// If I have uncommited content when receiving a remote patch, all the operations
|
|||
// placed after someone else's changes will create marker issues. We have to fix it
|
|||
var sorted = []; |
|||
|
|||
var myTotal = 0; |
|||
var theirTotal = 0; |
|||
var parseOp = function (me) { |
|||
return function (op) { |
|||
var size = (op.toInsert.length - op.toRemove); |
|||
|
|||
sorted.push({ |
|||
me: me, |
|||
offset: op.offset, |
|||
toInsert: op.toInsert, |
|||
toRemove: op.toRemove, |
|||
size: size, |
|||
marks: (me ? (oldMarks && oldMarks.marks) |
|||
: (authDoc.authormarks && authDoc.authormarks.marks)) || [], |
|||
doc: me ? localDoc : authDoc.content |
|||
}); |
|||
|
|||
if (me) { myTotal += size; } |
|||
else { theirTotal += size; } |
|||
}; |
|||
}; |
|||
myOps.forEach(parseOp(true)); |
|||
theirOps.forEach(parseOp(false)); |
|||
|
|||
// Sort the operation in reverse order of offset
|
|||
// If an operation from them has the same offset than an operation from me, put mine first
|
|||
sorted.sort(function (a, b) { |
|||
if (a.offset === b.offset) { |
|||
return a.me ? -1 : 1; |
|||
} |
|||
return b.offset - a.offset; |
|||
}); |
|||
|
|||
debug('log', sorted); |
|||
|
|||
// We start from the end so that we don't have to fix the offsets everytime
|
|||
var prev; |
|||
var toKeepEnd = []; |
|||
sorted.forEach(function (op) { |
|||
|
|||
// Not the same author? fix!
|
|||
if (prev) { |
|||
// Provide the new "totals"
|
|||
prev.total = prev.me ? myTotal : theirTotal; |
|||
op.total = op.me ? myTotal : theirTotal; |
|||
// Fix the markers
|
|||
fixMarks(Env, op, prev, content, toKeepEnd); |
|||
} |
|||
|
|||
if (op.me) { myTotal -= op.size; } |
|||
else { theirTotal -= op.size; } |
|||
prev = op; |
|||
}); |
|||
|
|||
debug('log', toKeepEnd); |
|||
|
|||
// We now have all the markers located after the first operation (ordered by offset).
|
|||
// Prepend the markers placed before this operation
|
|||
var first = sorted[sorted.length - 1]; |
|||
if (first) { Array.prototype.unshift.apply(toKeepEnd, first.marks); } |
|||
|
|||
// Commit our new markers
|
|||
Env.authormarks.marks = toKeepEnd; |
|||
|
|||
debug('warn', toKeepEnd); |
|||
debug('warn', '...End'); |
|||
}; |
|||
|
|||
// Reset marks displayed in CodeMirror to the marks stored in Env
|
|||
var setMarks = function (Env) { |
|||
// on remote update: remove all marks, add new marks if colors are enabled
|
|||
Env.editor.getAllMarks().forEach(function (marker) { |
|||
if (marker.attributes && marker.attributes['data-type'] === 'authormark') { |
|||
marker.clear(); |
|||
} |
|||
}); |
|||
|
|||
if (!Env.enabled) { return; } |
|||
|
|||
debug('error', 'setMarks'); |
|||
debug('log', Env.authormarks.marks); |
|||
|
|||
var authormarks = Env.authormarks; |
|||
authormarks.marks.forEach(function (mark) { |
|||
var uid = mark[0]; |
|||
if (uid !== -1 && (!authormarks.authors || !authormarks.authors[uid])) { return; } |
|||
var from = {}; |
|||
var to = {}; |
|||
from.line = mark[1]; |
|||
from.ch = mark[2]; |
|||
if (mark.length === 3) { |
|||
to.line = mark[1]; |
|||
to.ch = mark[2]+1; |
|||
} else if (mark.length === 4) { |
|||
to.line = mark[1]; |
|||
to.ch = mark[3]; |
|||
} else if (mark.length === 5) { |
|||
to.line = mark[3]; |
|||
to.ch = mark[4]; |
|||
} |
|||
|
|||
// Remove marks that are placed under this one
|
|||
try { |
|||
Env.editor.findMarks(from, to).forEach(function (mark) { |
|||
if (!mark || !mark.attributes || mark.attributes['data-type'] !== 'authormark') { return; } |
|||
mark.clear(); |
|||
}); |
|||
} catch (e) { |
|||
console.warn(mark, JSON.stringify(authormarks.marks)); |
|||
console.error(from, to); |
|||
console.error(e); |
|||
} |
|||
|
|||
addMark(Env, from, to, uid); |
|||
}); |
|||
}; |
|||
|
|||
var setMyData = function (Env) { |
|||
if (!Env.enabled) { return; } |
|||
|
|||
var userData = Env.common.getMetadataMgr().getUserData(); |
|||
var old = Env.authormarks.authors[Env.myAuthorId]; |
|||
Env.authormarks.authors[Env.myAuthorId] = { |
|||
name: userData.name, |
|||
curvePublic: userData.curvePublic, |
|||
color: userData.color |
|||
}; |
|||
if (!old || (old.name === userData.name && old.color === userData.color)) { return; } |
|||
return true; |
|||
}; |
|||
|
|||
var localChange = function (Env, change, cb) { |
|||
cb = cb || function () {}; |
|||
|
|||
if (!Env.enabled) { return void cb(); } |
|||
|
|||
debug('error', 'Local change'); |
|||
debug('log', change, true); |
|||
|
|||
if (change.origin === "setValue") { |
|||
// If the content is changed from a remote patch, we call localChange
|
|||
// in "onContentUpdate" directly
|
|||
return; |
|||
} |
|||
if (change.text === undefined || ['+input', 'paste'].indexOf(change.origin) === -1) { |
|||
return void cb(); |
|||
} |
|||
|
|||
// add new author mark if text is added. marks from removed text are removed automatically
|
|||
|
|||
// change.to is not always correct, fix it!
|
|||
var to_add = { |
|||
line: change.from.line + change.text.length-1, |
|||
}; |
|||
if (change.text.length > 1) { |
|||
// Multiple lines => take the length of the text added to the last line
|
|||
to_add.ch = change.text[change.text.length-1].length; |
|||
} else { |
|||
// Single line => use the "from" position and add the length of the text
|
|||
to_add.ch = change.from.ch + change.text[change.text.length-1].length; |
|||
} |
|||
|
|||
// If my text is inside an existing mark:
|
|||
// * if it's my mark, do nothing
|
|||
// * if it's someone else's mark, break it
|
|||
// We can only have one author mark at a given position, but there may be
|
|||
// another mark (cursor selection...) at this position so we use ".some"
|
|||
var toSplit, abort; |
|||
|
|||
|
|||
Env.editor.findMarks(change.from, to_add).some(function (mark) { |
|||
if (!mark.attributes) { return; } |
|||
if (mark.attributes['data-type'] !== 'authormark') { return; } |
|||
if (mark.attributes['data-uid'] !== Env.myAuthorId) { |
|||
toSplit = { |
|||
mark: mark, |
|||
uid: mark.attributes['data-uid'] |
|||
}; |
|||
} else { |
|||
// This is our mark: abort to avoid making a new one
|
|||
abort = true; |
|||
} |
|||
|
|||
return true; |
|||
}); |
|||
if (abort) { return void cb(); } |
|||
|
|||
// Add my data to the doc if it's missing
|
|||
if (!Env.authormarks.authors[Env.myAuthorId]) { |
|||
setMyData(Env); |
|||
} |
|||
|
|||
if (toSplit && toSplit.mark && typeof(toSplit.uid) !== "undefined") { |
|||
// Break the other user's mark if needed
|
|||
var _pos = toSplit.mark.find(); |
|||
toSplit.mark.clear(); |
|||
addMark(Env, _pos.from, change.from, toSplit.uid); // their mark, 1st part
|
|||
addMark(Env, change.from, to_add, Env.myAuthorId); // my mark
|
|||
addMark(Env, to_add, _pos.to, toSplit.uid); // their mark, 2nd part
|
|||
} else { |
|||
// Add my mark
|
|||
addMark(Env, change.from, to_add, Env.myAuthorId); |
|||
} |
|||
|
|||
cb(); |
|||
}; |
|||
|
|||
var setButton = function (Env, $button) { |
|||
var toggle = function () { |
|||
var tippy = $button[0] && $button[0]._tippy; |
|||
if (Env.opacity) { |
|||
Env.opacity = 0; |
|||
if (tippy) { tippy.title = Messages.cba_show; } |
|||
else { $button.attr('title', Messages.cba_show); } |
|||
$button.removeClass("cp-toolbar-button-active"); |
|||
} else { |
|||
Env.opacity = MARK_OPACITY; |
|||
if (tippy) { tippy.title = Messages.cba_hide; } |
|||
else { $button.attr('title', Messages.cba_hide); } |
|||
$button.addClass("cp-toolbar-button-active"); |
|||
} |
|||
}; |
|||
toggle(); |
|||
Env.$button = $button; |
|||
$button.click(function() { |
|||
toggle(); |
|||
setMarks(Env); |
|||
}); |
|||
}; |
|||
|
|||
var authorUid = function (existing) { |
|||
if (!Array.isArray(existing)) { existing = []; } |
|||
var n; |
|||
var i = 0; |
|||
while (!n || existing.indexOf(n) !== -1 && i++ < 1000) { |
|||
n = Math.floor(Math.random() * 1000000); |
|||
} |
|||
// If we can't find a valid number in 1000 iterations, use 0...
|
|||
if (existing.indexOf(n) !== -1) { n = 0; } |
|||
return n; |
|||
}; |
|||
var getAuthorId = function (Env) { |
|||
var existing = Object.keys(Env.authormarks.authors || {}).map(Number); |
|||
if (!Env.common.isLoggedIn()) { return authorUid(existing); } |
|||
|
|||
var userData = Env.common.getMetadataMgr().getUserData(); |
|||
var uid; |
|||
existing.some(function (id) { |
|||
var author = Env.authormarks.authors[id] || {}; |
|||
if (author.curvePublic !== userData.curvePublic) { return; } |
|||
uid = Number(id); |
|||
return true; |
|||
}); |
|||
return uid || authorUid(existing); |
|||
}; |
|||
var ready = function (Env) { |
|||
Env.ready = true; |
|||
Env.myAuthorId = getAuthorId(Env); |
|||
|
|||
if (!Env.enabled) { return; } |
|||
if (Env.$button) { Env.$button.show(); } |
|||
if (!Env.authormarks.marks || !Env.authormarks.marks.length) { |
|||
Env.authormarks = Util.clone(DEFAULT); |
|||
} |
|||
setMarks(Env); |
|||
}; |
|||
|
|||
var getState = function (Env) { |
|||
return Boolean(Env.authormarks && Env.authormarks.marks); |
|||
}; |
|||
var setState = function (Env, enabled) { |
|||
// If the state has changed in the pad, change the Env too
|
|||
if (!Env.ready) { return; } |
|||
if (Env.enabled === enabled) { return; } |
|||
Env.enabled = enabled; |
|||
if (!Env.enabled) { |
|||
// Reset marks
|
|||
Env.authormarks = {}; |
|||
setMarks(Env); |
|||
if (Env.$button) { Env.$button.hide(); } |
|||
} else { |
|||
Env.myAuthorId = getAuthorId(Env); |
|||
// If it's a reset, add initial marker
|
|||
if (!Env.authormarks.marks || !Env.authormarks.marks.length) { |
|||
Env.authormarks = Util.clone(DEFAULT); |
|||
setMarks(Env); |
|||
} |
|||
if (Env.$button) { Env.$button.show(); } |
|||
} |
|||
if (Env.ready) { Env.framework.localChange(); } |
|||
}; |
|||
|
|||
Markers.create = function (config) { |
|||
var Env = config; |
|||
Env.authormarks = {}; |
|||
Env.enabled = false; |
|||
Env.myAuthorId = 0; |
|||
|
|||
if (Env.devMode) { |
|||
debug = function (level, obj, logObject) { |
|||
var f = console.log; |
|||
if (typeof(console[level]) === "function") { |
|||
f = console[level]; |
|||
} |
|||
if (logObject) { return void f(obj); } |
|||
}; |
|||
} |
|||
|
|||
var metadataMgr = Env.common.getMetadataMgr(); |
|||
metadataMgr.onChange(function () { |
|||
// If the markers are disabled or if I haven't pushed content since the last reset,
|
|||
// don't update my data
|
|||
if (!Env.enabled || !Env.myAuthorId || !Env.authormarks.authors || |
|||
!Env.authormarks.authors[Env.myAuthorId]) { |
|||
return; |
|||
} |
|||
|
|||
// Update my data
|
|||
var changed = setMyData(Env); |
|||
if (changed) { |
|||
setMarks(Env); |
|||
Env.framework.localChange(); |
|||
} |
|||
}); |
|||
|
|||
var call = function (f) { |
|||
return function () { |
|||
try { |
|||
[].unshift.call(arguments, Env); |
|||
return f.apply(null, arguments); |
|||
} catch (e) { |
|||
console.error(e); |
|||
} |
|||
}; |
|||
}; |
|||
|
|||
|
|||
return { |
|||
addMark: call(addMark), |
|||
getAuthorMarks: call(getAuthorMarks), |
|||
updateAuthorMarks: call(updateAuthorMarks), |
|||
checkMarks: call(checkMarks), |
|||
setMarks: call(setMarks), |
|||
localChange: call(localChange), |
|||
ready: call(ready), |
|||
setButton: call(setButton), |
|||
getState: call(getState), |
|||
setState: call(setState), |
|||
}; |
|||
}; |
|||
|
|||
return Markers; |
|||
}); |
|||
@ -0,0 +1,4 @@ |
|||
.sectionTitle, .titleText { |
|||
font-weight: bold; |
|||
} |
|||
|
|||
59134
www/code/mermaid.js
File diff suppressed because it is too large
View File
File diff suppressed because it is too large
View File
56
www/code/mermaid.min.js
File diff suppressed because it is too large
View File
File diff suppressed because it is too large
View File
1916
www/common/common-ui-elements.js
File diff suppressed because it is too large
View File
File diff suppressed because it is too large
View File
Some files were not shown because too many files changed in this diff
Write
Preview
Loading…
Cancel
Save