93 changed files with 5569 additions and 3758 deletions
Split View
Diff Options
-
39CHANGELOG.md
-
21config/config.example.js
-
2customize.dist/pages.js
-
6customize.dist/src/less2/include/buttons.less
-
2customize.dist/src/less2/include/colortheme.less
-
113customize.dist/src/less2/include/corner.less
-
7customize.dist/src/less2/include/fileupload.less
-
9customize.dist/src/less2/include/notifications.less
-
26customize.dist/src/less2/include/sidebar-layout.less
-
1customize.dist/src/less2/include/variables.less
-
2docs/example.nginx.conf
-
1021historyKeeper.js
-
48lib/api.js
-
122lib/commands/admin-rpc.js
-
172lib/commands/block.js
-
274lib/commands/channel.js
-
190lib/commands/core.js
-
128lib/commands/metadata.js
-
464lib/commands/pin-rpc.js
-
107lib/commands/quota.js
-
57lib/commands/upload.js
-
97lib/historyKeeper.js
-
929lib/hk-util.js
-
17lib/metadata.js
-
306lib/rpc.js
-
172lib/schedule.js
-
99package-lock.json
-
7package.json
-
1766rpc.js
-
50scripts/evict-inactive.js
-
19scripts/tests/test-rpc.js
-
220scripts/tests/test-scheduler.js
-
115server.js
-
363storage/file.js
-
17storage/tasks.js
-
24www/admin/app-admin.less
-
161www/common/common-hash.js
-
91www/common/common-interface.js
-
12www/common/common-messaging.js
-
239www/common/common-ui-elements.js
-
3www/common/common-util.js
-
54www/common/cryptpad-common.js
-
1www/common/diffMarked.js
-
178www/common/drive-ui.js
-
18www/common/notifications.js
-
9www/common/onlyoffice/app-oo.less
-
141www/common/onlyoffice/inner.js
-
10www/common/onlyoffice/main.js
-
116www/common/outer/async-store.js
-
114www/common/outer/mailbox-handlers.js
-
14www/common/outer/mailbox.js
-
2www/common/outer/messenger.js
-
23www/common/outer/onlyoffice.js
-
1www/common/outer/store-rpc.js
-
20www/common/outer/team.js
-
16www/common/outer/userObject.js
-
12www/common/pinpad.js
-
43www/common/proxy-manager.js
-
4www/common/sframe-app-framework.js
-
11www/common/sframe-app-outer.js
-
9www/common/sframe-chainpad-netflux-inner.js
-
7www/common/sframe-common-codemirror.js
-
22www/common/sframe-common-file.js
-
19www/common/sframe-common-mailbox.js
-
174www/common/sframe-common-outer.js
-
15www/common/sframe-common.js
-
95www/common/translations/messages.ca.json
-
67www/common/translations/messages.de.json
-
1www/common/translations/messages.el.json
-
1www/common/translations/messages.es.json
-
202www/common/translations/messages.fi.json
-
57www/common/translations/messages.fr.json
-
43www/common/translations/messages.it.json
-
59www/common/translations/messages.json
-
1www/common/translations/messages.nb.json
-
1www/common/translations/messages.pt-br.json
-
1www/common/translations/messages.ro.json
-
2www/common/translations/messages.ru.json
-
10www/drive/inner.js
-
38www/drive/main.js
-
15www/file/main.js
-
9www/notifications/app-notifications.less
-
8www/notifications/inner.js
-
2www/pad/inner.js
-
6www/poll/inner.js
-
11www/poll/main.js
-
25www/profile/inner.js
-
11www/settings/app-settings.less
-
79www/settings/inner.js
-
7www/support/ui.js
-
6www/teams/app-team.less
-
4www/teams/inner.js
-
15www/teams/main.js
1021
historyKeeper.js
File diff suppressed because it is too large
View File
File diff suppressed because it is too large
View File
@ -0,0 +1,48 @@ |
|||
/* jshint esversion: 6 */ |
|||
const WebSocketServer = require('ws').Server; |
|||
const NetfluxSrv = require('chainpad-server'); |
|||
|
|||
module.exports.create = function (config) { |
|||
// asynchronously create a historyKeeper and RPC together
|
|||
require('./historyKeeper.js').create(config, function (err, historyKeeper) { |
|||
if (err) { throw err; } |
|||
|
|||
var log = config.log; |
|||
|
|||
// spawn ws server and attach netflux event handlers
|
|||
NetfluxSrv.create(new WebSocketServer({ server: config.httpServer})) |
|||
.on('channelClose', historyKeeper.channelClose) |
|||
.on('channelMessage', historyKeeper.channelMessage) |
|||
.on('channelOpen', historyKeeper.channelOpen) |
|||
.on('sessionClose', function (userId, reason) { |
|||
if (['BAD_MESSAGE', 'SOCKET_ERROR', 'SEND_MESSAGE_FAIL_2'].indexOf(reason) !== -1) { |
|||
if (reason && reason.code === 'ECONNRESET') { return; } |
|||
return void log.error('SESSION_CLOSE_WITH_ERROR', { |
|||
userId: userId, |
|||
reason: reason, |
|||
}); |
|||
} |
|||
|
|||
if (reason && reason === 'SOCKET_CLOSED') { return; } |
|||
log.verbose('SESSION_CLOSE_ROUTINE', { |
|||
userId: userId, |
|||
reason: reason, |
|||
}); |
|||
}) |
|||
.on('error', function (error, label, info) { |
|||
if (!error) { return; } |
|||
/* labels: |
|||
SEND_MESSAGE_FAIL, SEND_MESSAGE_FAIL_2, FAIL_TO_DISCONNECT, |
|||
FAIL_TO_TERMINATE, HANDLE_CHANNEL_LEAVE, NETFLUX_BAD_MESSAGE, |
|||
NETFLUX_WEBSOCKET_ERROR |
|||
*/ |
|||
log.error(label, { |
|||
code: error.code, |
|||
message: error.message, |
|||
stack: error.stack, |
|||
info: info, |
|||
}); |
|||
}) |
|||
.register(historyKeeper.id, historyKeeper.directMessage); |
|||
}); |
|||
}; |
|||
@ -0,0 +1,122 @@ |
|||
/*jshint esversion: 6 */ |
|||
const BatchRead = require("../batch-read"); |
|||
const nThen = require("nthen"); |
|||
const getFolderSize = require("get-folder-size"); |
|||
//const Util = require("../common-util");
|
|||
|
|||
var Fs = require("fs"); |
|||
|
|||
var Admin = module.exports; |
|||
|
|||
var getActiveSessions = function (Env, Server, cb) { |
|||
var stats = Server.getSessionStats(); |
|||
cb(void 0, [ |
|||
stats.total, |
|||
stats.unique |
|||
]); |
|||
}; |
|||
|
|||
var shutdown = function (Env, Server, cb) { |
|||
if (true) { |
|||
return void cb('E_NOT_IMPLEMENTED'); |
|||
} |
|||
|
|||
// disconnect all users and reject new connections
|
|||
Server.shutdown(); |
|||
|
|||
// stop all intervals that may be running
|
|||
Object.keys(Env.intervals).forEach(function (name) { |
|||
clearInterval(Env.intervals[name]); |
|||
}); |
|||
|
|||
// set a flag to prevent incoming database writes
|
|||
// wait until all pending writes are complete
|
|||
// then process.exit(0);
|
|||
// and allow system functionality to restart the server
|
|||
}; |
|||
|
|||
const batchRegisteredUsers = BatchRead("GET_REGISTERED_USERS"); |
|||
var getRegisteredUsers = function (Env, cb) { |
|||
batchRegisteredUsers('', cb, function (done) { |
|||
var dir = Env.paths.pin; |
|||
var folders; |
|||
var users = 0; |
|||
nThen(function (waitFor) { |
|||
Fs.readdir(dir, waitFor(function (err, list) { |
|||
if (err) { |
|||
waitFor.abort(); |
|||
return void done(err); |
|||
} |
|||
folders = list; |
|||
})); |
|||
}).nThen(function (waitFor) { |
|||
folders.forEach(function (f) { |
|||
var dir = Env.paths.pin + '/' + f; |
|||
Fs.readdir(dir, waitFor(function (err, list) { |
|||
if (err) { return; } |
|||
users += list.length; |
|||
})); |
|||
}); |
|||
}).nThen(function () { |
|||
done(void 0, users); |
|||
}); |
|||
}); |
|||
}; |
|||
|
|||
const batchDiskUsage = BatchRead("GET_DISK_USAGE"); |
|||
var getDiskUsage = function (Env, cb) { |
|||
batchDiskUsage('', cb, function (done) { |
|||
var data = {}; |
|||
nThen(function (waitFor) { |
|||
getFolderSize('./', waitFor(function(err, info) { |
|||
data.total = info; |
|||
})); |
|||
getFolderSize(Env.paths.pin, waitFor(function(err, info) { |
|||
data.pin = info; |
|||
})); |
|||
getFolderSize(Env.paths.blob, waitFor(function(err, info) { |
|||
data.blob = info; |
|||
})); |
|||
getFolderSize(Env.paths.staging, waitFor(function(err, info) { |
|||
data.blobstage = info; |
|||
})); |
|||
getFolderSize(Env.paths.block, waitFor(function(err, info) { |
|||
data.block = info; |
|||
})); |
|||
getFolderSize(Env.paths.data, waitFor(function(err, info) { |
|||
data.datastore = info; |
|||
})); |
|||
}).nThen(function () { |
|||
done(void 0, data); |
|||
}); |
|||
}); |
|||
}; |
|||
|
|||
Admin.command = function (Env, safeKey, data, cb, Server) { |
|||
var admins = Env.admins; |
|||
//var unsafeKey = Util.unescapeKeyCharacters(safeKey);
|
|||
if (admins.indexOf(safeKey) === -1) { |
|||
return void cb("FORBIDDEN"); |
|||
} |
|||
|
|||
// Handle commands here
|
|||
switch (data[0]) { |
|||
case 'ACTIVE_SESSIONS': |
|||
return getActiveSessions(Env, Server, cb); |
|||
case 'ACTIVE_PADS': |
|||
return cb(void 0, Server.getActiveChannelCount()); |
|||
case 'REGISTERED_USERS': |
|||
return getRegisteredUsers(Env, cb); |
|||
case 'DISK_USAGE': |
|||
return getDiskUsage(Env, cb); |
|||
case 'FLUSH_CACHE': |
|||
Env.flushCache(); |
|||
return cb(void 0, true); |
|||
case 'SHUTDOWN': |
|||
return shutdown(Env, Server, cb); |
|||
default: |
|||
return cb('UNHANDLED_ADMIN_COMMAND'); |
|||
} |
|||
}; |
|||
|
|||
|
|||
@ -0,0 +1,172 @@ |
|||
/*jshint esversion: 6 */ |
|||
/* globals Buffer*/ |
|||
var Block = module.exports; |
|||
|
|||
const Fs = require("fs"); |
|||
const Fse = require("fs-extra"); |
|||
const Path = require("path"); |
|||
const Nacl = require("tweetnacl/nacl-fast"); |
|||
const nThen = require("nthen"); |
|||
|
|||
const Util = require("../common-util"); |
|||
|
|||
/* |
|||
We assume that the server is secured against MitM attacks |
|||
via HTTPS, and that malicious actors do not have code execution |
|||
capabilities. If they do, we have much more serious problems. |
|||
|
|||
The capability to replay a block write or remove results in either |
|||
a denial of service for the user whose block was removed, or in the |
|||
case of a write, a rollback to an earlier password. |
|||
|
|||
Since block modification is destructive, this can result in loss |
|||
of access to the user's drive. |
|||
|
|||
So long as the detached signature is never observed by a malicious |
|||
party, and the server discards it after proof of knowledge, replays |
|||
are not possible. However, this precludes verification of the signature |
|||
at a later time. |
|||
|
|||
Despite this, an integrity check is still possible by the original |
|||
author of the block, since we assume that the block will have been |
|||
encrypted with xsalsa20-poly1305 which is authenticated. |
|||
*/ |
|||
var validateLoginBlock = function (Env, publicKey, signature, block, cb) { // FIXME BLOCKS
|
|||
// convert the public key to a Uint8Array and validate it
|
|||
if (typeof(publicKey) !== 'string') { return void cb('E_INVALID_KEY'); } |
|||
|
|||
var u8_public_key; |
|||
try { |
|||
u8_public_key = Nacl.util.decodeBase64(publicKey); |
|||
} catch (e) { |
|||
return void cb('E_INVALID_KEY'); |
|||
} |
|||
|
|||
var u8_signature; |
|||
try { |
|||
u8_signature = Nacl.util.decodeBase64(signature); |
|||
} catch (e) { |
|||
Env.Log.error('INVALID_BLOCK_SIGNATURE', e); |
|||
return void cb('E_INVALID_SIGNATURE'); |
|||
} |
|||
|
|||
// convert the block to a Uint8Array
|
|||
var u8_block; |
|||
try { |
|||
u8_block = Nacl.util.decodeBase64(block); |
|||
} catch (e) { |
|||
return void cb('E_INVALID_BLOCK'); |
|||
} |
|||
|
|||
// take its hash
|
|||
var hash = Nacl.hash(u8_block); |
|||
|
|||
// validate the signature against the hash of the content
|
|||
var verified = Nacl.sign.detached.verify(hash, u8_signature, u8_public_key); |
|||
|
|||
// existing authentication ensures that users cannot replay old blocks
|
|||
|
|||
// call back with (err) if unsuccessful
|
|||
if (!verified) { return void cb("E_COULD_NOT_VERIFY"); } |
|||
|
|||
return void cb(null, u8_block); |
|||
}; |
|||
|
|||
var createLoginBlockPath = function (Env, publicKey) { // FIXME BLOCKS
|
|||
// prepare publicKey to be used as a file name
|
|||
var safeKey = Util.escapeKeyCharacters(publicKey); |
|||
|
|||
// validate safeKey
|
|||
if (typeof(safeKey) !== 'string') { |
|||
return; |
|||
} |
|||
|
|||
// derive the full path
|
|||
// /home/cryptpad/cryptpad/block/fg/fg32kefksjdgjkewrjksdfksjdfsdfskdjfsfd
|
|||
return Path.join(Env.paths.block, safeKey.slice(0, 2), safeKey); |
|||
}; |
|||
|
|||
Block.writeLoginBlock = function (Env, safeKey, msg, cb) { // FIXME BLOCKS
|
|||
//console.log(msg);
|
|||
var publicKey = msg[0]; |
|||
var signature = msg[1]; |
|||
var block = msg[2]; |
|||
|
|||
validateLoginBlock(Env, publicKey, signature, block, function (e, validatedBlock) { |
|||
if (e) { return void cb(e); } |
|||
if (!(validatedBlock instanceof Uint8Array)) { return void cb('E_INVALID_BLOCK'); } |
|||
|
|||
// derive the filepath
|
|||
var path = createLoginBlockPath(Env, publicKey); |
|||
|
|||
// make sure the path is valid
|
|||
if (typeof(path) !== 'string') { |
|||
return void cb('E_INVALID_BLOCK_PATH'); |
|||
} |
|||
|
|||
var parsed = Path.parse(path); |
|||
if (!parsed || typeof(parsed.dir) !== 'string') { |
|||
return void cb("E_INVALID_BLOCK_PATH_2"); |
|||
} |
|||
|
|||
nThen(function (w) { |
|||
// make sure the path to the file exists
|
|||
Fse.mkdirp(parsed.dir, w(function (e) { |
|||
if (e) { |
|||
w.abort(); |
|||
cb(e); |
|||
} |
|||
})); |
|||
}).nThen(function () { |
|||
// actually write the block
|
|||
|
|||
// flow is dumb and I need to guard against this which will never happen
|
|||
/*:: if (typeof(validatedBlock) === 'undefined') { throw new Error('should never happen'); } */ |
|||
/*:: if (typeof(path) === 'undefined') { throw new Error('should never happen'); } */ |
|||
Fs.writeFile(path, Buffer.from(validatedBlock), { encoding: "binary", }, function (err) { |
|||
if (err) { return void cb(err); } |
|||
cb(); |
|||
}); |
|||
}); |
|||
}); |
|||
}; |
|||
|
|||
/* |
|||
When users write a block, they upload the block, and provide |
|||
a signature proving that they deserve to be able to write to |
|||
the location determined by the public key. |
|||
|
|||
When removing a block, there is nothing to upload, but we need |
|||
to sign something. Since the signature is considered sensitive |
|||
information, we can just sign some constant and use that as proof. |
|||
|
|||
*/ |
|||
Block.removeLoginBlock = function (Env, safeKey, msg, cb) { // FIXME BLOCKS
|
|||
var publicKey = msg[0]; |
|||
var signature = msg[1]; |
|||
var block = Nacl.util.decodeUTF8('DELETE_BLOCK'); // clients and the server will have to agree on this constant
|
|||
|
|||
validateLoginBlock(Env, publicKey, signature, block, function (e /*::, validatedBlock */) { |
|||
if (e) { return void cb(e); } |
|||
// derive the filepath
|
|||
var path = createLoginBlockPath(Env, publicKey); |
|||
|
|||
// make sure the path is valid
|
|||
if (typeof(path) !== 'string') { |
|||
return void cb('E_INVALID_BLOCK_PATH'); |
|||
} |
|||
|
|||
// FIXME COLDSTORAGE
|
|||
Fs.unlink(path, function (err) { |
|||
Env.Log.info('DELETION_BLOCK_BY_OWNER_RPC', { |
|||
publicKey: publicKey, |
|||
path: path, |
|||
status: err? String(err): 'SUCCESS', |
|||
}); |
|||
|
|||
if (err) { return void cb(err); } |
|||
cb(); |
|||
}); |
|||
}); |
|||
}; |
|||
|
|||
@ -0,0 +1,274 @@ |
|||
/*jshint esversion: 6 */ |
|||
const Channel = module.exports; |
|||
|
|||
const Util = require("../common-util"); |
|||
const nThen = require("nthen"); |
|||
const Core = require("./core"); |
|||
const Metadata = require("./metadata"); |
|||
|
|||
Channel.clearOwnedChannel = function (Env, safeKey, channelId, cb, Server) { |
|||
if (typeof(channelId) !== 'string' || channelId.length !== 32) { |
|||
return cb('INVALID_ARGUMENTS'); |
|||
} |
|||
var unsafeKey = Util.unescapeKeyCharacters(safeKey); |
|||
|
|||
Metadata.getMetadata(Env, channelId, function (err, metadata) { |
|||
if (err) { return void cb(err); } |
|||
if (!Core.hasOwners(metadata)) { return void cb('E_NO_OWNERS'); } |
|||
// Confirm that the channel is owned by the user in question
|
|||
if (!Core.isOwner(metadata, unsafeKey)) { |
|||
return void cb('INSUFFICIENT_PERMISSIONS'); |
|||
} |
|||
return void Env.msgStore.clearChannel(channelId, function (e) { |
|||
if (e) { return void cb(e); } |
|||
cb(); |
|||
|
|||
const channel_cache = Env.historyKeeper.channel_cache; |
|||
|
|||
const clear = function () { |
|||
// delete the channel cache because it will have been invalidated
|
|||
delete channel_cache[channelId]; |
|||
}; |
|||
|
|||
nThen(function (w) { |
|||
Server.getChannelUserList(channelId).forEach(function (userId) { |
|||
Server.send(userId, [ |
|||
0, |
|||
Env.historyKeeper.id, |
|||
'MSG', |
|||
userId, |
|||
JSON.stringify({ |
|||
error: 'ECLEARED', |
|||
channel: channelId |
|||
}) |
|||
], w()); |
|||
}); |
|||
}).nThen(function () { |
|||
clear(); |
|||
}).orTimeout(function () { |
|||
Env.Log.warn("ON_CHANNEL_CLEARED_TIMEOUT", channelId); |
|||
clear(); |
|||
}, 30000); |
|||
}); |
|||
}); |
|||
}; |
|||
|
|||
Channel.removeOwnedChannel = function (Env, safeKey, channelId, cb, Server) { |
|||
if (typeof(channelId) !== 'string' || !Core.isValidId(channelId)) { |
|||
return cb('INVALID_ARGUMENTS'); |
|||
} |
|||
var unsafeKey = Util.unescapeKeyCharacters(safeKey); |
|||
|
|||
if (Env.blobStore.isFileId(channelId)) { |
|||
var blobId = channelId; |
|||
|
|||
return void nThen(function (w) { |
|||
// check if you have permissions
|
|||
Env.blobStore.isOwnedBy(safeKey, blobId, w(function (err, owned) { |
|||
if (err || !owned) { |
|||
w.abort(); |
|||
return void cb("INSUFFICIENT_PERMISSIONS"); |
|||
} |
|||
})); |
|||
}).nThen(function (w) { |
|||
// remove the blob
|
|||
return void Env.blobStore.archive.blob(blobId, w(function (err) { |
|||
Env.Log.info('ARCHIVAL_OWNED_FILE_BY_OWNER_RPC', { |
|||
safeKey: safeKey, |
|||
blobId: blobId, |
|||
status: err? String(err): 'SUCCESS', |
|||
}); |
|||
if (err) { |
|||
w.abort(); |
|||
return void cb(err); |
|||
} |
|||
})); |
|||
}).nThen(function () { |
|||
// archive the proof
|
|||
return void Env.blobStore.archive.proof(safeKey, blobId, function (err) { |
|||
Env.Log.info("ARCHIVAL_PROOF_REMOVAL_BY_OWNER_RPC", { |
|||
safeKey: safeKey, |
|||
blobId: blobId, |
|||
status: err? String(err): 'SUCCESS', |
|||
}); |
|||
if (err) { |
|||
return void cb("E_PROOF_REMOVAL"); |
|||
} |
|||
cb(void 0, 'OK'); |
|||
}); |
|||
}); |
|||
} |
|||
|
|||
Metadata.getMetadata(Env, channelId, function (err, metadata) { |
|||
if (err) { return void cb(err); } |
|||
if (!Core.hasOwners(metadata)) { return void cb('E_NO_OWNERS'); } |
|||
if (!Core.isOwner(metadata, unsafeKey)) { |
|||
return void cb('INSUFFICIENT_PERMISSIONS'); |
|||
} |
|||
// temporarily archive the file
|
|||
return void Env.msgStore.archiveChannel(channelId, function (e) { |
|||
Env.Log.info('ARCHIVAL_CHANNEL_BY_OWNER_RPC', { |
|||
unsafeKey: unsafeKey, |
|||
channelId: channelId, |
|||
status: e? String(e): 'SUCCESS', |
|||
}); |
|||
if (e) { |
|||
return void cb(e); |
|||
} |
|||
cb(void 0, 'OK'); |
|||
|
|||
const channel_cache = Env.historyKeeper.channel_cache; |
|||
const metadata_cache = Env.historyKeeper.metadata_cache; |
|||
|
|||
const clear = function () { |
|||
delete channel_cache[channelId]; |
|||
Server.clearChannel(channelId); |
|||
delete metadata_cache[channelId]; |
|||
}; |
|||
|
|||
// an owner of a channel deleted it
|
|||
nThen(function (w) { |
|||
// close the channel in the store
|
|||
Env.msgStore.closeChannel(channelId, w()); |
|||
}).nThen(function (w) { |
|||
// Server.channelBroadcast would be better
|
|||
// but we can't trust it to track even one callback,
|
|||
// let alone many in parallel.
|
|||
// so we simulate it on this side to avoid race conditions
|
|||
Server.getChannelUserList(channelId).forEach(function (userId) { |
|||
Server.send(userId, [ |
|||
0, |
|||
Env.historyKeeper.id, |
|||
"MSG", |
|||
userId, |
|||
JSON.stringify({ |
|||
error: 'EDELETED', |
|||
channel: channelId, |
|||
}) |
|||
], w()); |
|||
}); |
|||
}).nThen(function () { |
|||
// clear the channel's data from memory
|
|||
// once you've sent everyone a notice that the channel has been deleted
|
|||
clear(); |
|||
}).orTimeout(function () { |
|||
Env.Log.warn('ON_CHANNEL_DELETED_TIMEOUT', channelId); |
|||
clear(); |
|||
}, 30000); |
|||
}); |
|||
}); |
|||
}; |
|||
|
|||
Channel.trimHistory = function (Env, safeKey, data, cb) { |
|||
if (!(data && typeof(data.channel) === 'string' && typeof(data.hash) === 'string' && data.hash.length === 64)) { |
|||
return void cb('INVALID_ARGS'); |
|||
} |
|||
|
|||
var channelId = data.channel; |
|||
var unsafeKey = Util.unescapeKeyCharacters(safeKey); |
|||
var hash = data.hash; |
|||
|
|||
nThen(function (w) { |
|||
Metadata.getMetadata(Env, channelId, w(function (err, metadata) { |
|||
if (err) { return void cb(err); } |
|||
if (!Core.hasOwners(metadata)) { |
|||
w.abort(); |
|||
return void cb('E_NO_OWNERS'); |
|||
} |
|||
if (!Core.isOwner(metadata, unsafeKey)) { |
|||
w.abort(); |
|||
return void cb("INSUFFICIENT_PERMISSIONS"); |
|||
} |
|||
// else fall through to the next block
|
|||
})); |
|||
}).nThen(function () { |
|||
Env.msgStore.trimChannel(channelId, hash, function (err) { |
|||
if (err) { return void cb(err); } |
|||
// clear historyKeeper's cache for this channel
|
|||
Env.historyKeeper.channelClose(channelId); |
|||
cb(void 0, 'OK'); |
|||
delete Env.historyKeeper.channel_cache[channelId]; |
|||
delete Env.historyKeeper.metadata_cache[channelId]; |
|||
}); |
|||
}); |
|||
}; |
|||
|
|||
var ARRAY_LINE = /^\[/; |
|||
|
|||
/* Files can contain metadata but not content |
|||
call back with true if the channel log has no content other than metadata |
|||
otherwise false |
|||
*/ |
|||
Channel.isNewChannel = function (Env, channel, cb) { |
|||
if (!Core.isValidId(channel)) { return void cb('INVALID_CHAN'); } |
|||
if (channel.length !== 32) { return void cb('INVALID_CHAN'); } |
|||
|
|||
var done = false; |
|||
Env.msgStore.getMessages(channel, function (msg) { |
|||
if (done) { return; } |
|||
try { |
|||
if (typeof(msg) === 'string' && ARRAY_LINE.test(msg)) { |
|||
done = true; |
|||
return void cb(void 0, false); |
|||
} |
|||
} catch (e) { |
|||
Env.WARN('invalid message read from store', e); |
|||
} |
|||
}, function () { |
|||
if (done) { return; } |
|||
// no more messages...
|
|||
cb(void 0, true); |
|||
}); |
|||
}; |
|||
|
|||
/* writePrivateMessage |
|||
allows users to anonymously send a message to the channel |
|||
prevents their netflux-id from being stored in history |
|||
and from being broadcast to anyone that might currently be in the channel |
|||
|
|||
Otherwise behaves the same as sending to a channel |
|||
*/ |
|||
Channel.writePrivateMessage = function (Env, args, cb, Server) { |
|||
var channelId = args[0]; |
|||
var msg = args[1]; |
|||
|
|||
// don't bother handling empty messages
|
|||
if (!msg) { return void cb("INVALID_MESSAGE"); } |
|||
|
|||
// don't support anything except regular channels
|
|||
if (!Core.isValidId(channelId) || channelId.length !== 32) { |
|||
return void cb("INVALID_CHAN"); |
|||
} |
|||
|
|||
// We expect a modern netflux-websocket-server instance
|
|||
// if this API isn't here everything will fall apart anyway
|
|||
if (!(Server && typeof(Server.send) === 'function')) { |
|||
return void cb("NOT_IMPLEMENTED"); |
|||
} |
|||
|
|||
// historyKeeper expects something with an 'id' attribute
|
|||
// it will fail unless you provide it, but it doesn't need anything else
|
|||
var channelStruct = { |
|||
id: channelId, |
|||
}; |
|||
|
|||
// construct a message to store and broadcast
|
|||
var fullMessage = [ |
|||
0, // idk
|
|||
null, // normally the netflux id, null isn't rejected, and it distinguishes messages written in this way
|
|||
"MSG", // indicate that this is a MSG
|
|||
channelId, // channel id
|
|||
msg // the actual message content. Generally a string
|
|||
]; |
|||
|
|||
// historyKeeper already knows how to handle metadata and message validation, so we just pass it off here
|
|||
// if the message isn't valid it won't be stored.
|
|||
Env.historyKeeper.channelMessage(Server, channelStruct, fullMessage); |
|||
|
|||
Server.getChannelUserList(channelId).forEach(function (userId) { |
|||
Server.send(userId, fullMessage); |
|||
}); |
|||
|
|||
cb(); |
|||
}; |
|||
|
|||
@ -0,0 +1,190 @@ |
|||
/*jshint esversion: 6 */ |
|||
/* globals process */ |
|||
const Core = module.exports; |
|||
const Util = require("../common-util"); |
|||
const escapeKeyCharacters = Util.escapeKeyCharacters; |
|||
|
|||
/* Use Nacl for checking signatures of messages */ |
|||
const Nacl = require("tweetnacl/nacl-fast"); |
|||
|
|||
|
|||
Core.DEFAULT_LIMIT = 50 * 1024 * 1024; |
|||
Core.SESSION_EXPIRATION_TIME = 60 * 1000; |
|||
|
|||
Core.isValidId = function (chan) { |
|||
return chan && chan.length && /^[a-zA-Z0-9=+-]*$/.test(chan) && |
|||
[32, 48].indexOf(chan.length) > -1; |
|||
}; |
|||
|
|||
var makeToken = Core.makeToken = function () { |
|||
return Number(Math.floor(Math.random() * Number.MAX_SAFE_INTEGER)) |
|||
.toString(16); |
|||
}; |
|||
|
|||
Core.makeCookie = function (token) { |
|||
var time = (+new Date()); |
|||
time -= time % 5000; |
|||
|
|||
return [ |
|||
time, |
|||
process.pid, |
|||
token |
|||
]; |
|||
}; |
|||
|
|||
var parseCookie = function (cookie) { |
|||
if (!(cookie && cookie.split)) { return null; } |
|||
|
|||
var parts = cookie.split('|'); |
|||
if (parts.length !== 3) { return null; } |
|||
|
|||
var c = {}; |
|||
c.time = new Date(parts[0]); |
|||
c.pid = Number(parts[1]); |
|||
c.seq = parts[2]; |
|||
return c; |
|||
}; |
|||
|
|||
Core.getSession = function (Sessions, key) { |
|||
var safeKey = escapeKeyCharacters(key); |
|||
if (Sessions[safeKey]) { |
|||
Sessions[safeKey].atime = +new Date(); |
|||
return Sessions[safeKey]; |
|||
} |
|||
var user = Sessions[safeKey] = {}; |
|||
user.atime = +new Date(); |
|||
user.tokens = [ |
|||
makeToken() |
|||
]; |
|||
return user; |
|||
}; |
|||
|
|||
Core.expireSession = function (Sessions, safeKey) { |
|||
var session = Sessions[safeKey]; |
|||
if (!session) { return; } |
|||
if (session.blobstage) { |
|||
session.blobstage.close(); |
|||
} |
|||
delete Sessions[safeKey]; |
|||
}; |
|||
|
|||
Core.expireSessionAsync = function (Env, safeKey, cb) { |
|||
setTimeout(function () { |
|||
Core.expireSession(Env.Sessions, safeKey); |
|||
cb(void 0, 'OK'); |
|||
}); |
|||
}; |
|||
|
|||
var isTooOld = function (time, now) { |
|||
return (now - time) > 300000; |
|||
}; |
|||
|
|||
Core.expireSessions = function (Sessions) { |
|||
var now = +new Date(); |
|||
Object.keys(Sessions).forEach(function (safeKey) { |
|||
var session = Sessions[safeKey]; |
|||
if (session && isTooOld(session.atime, now)) { |
|||
Core.expireSession(Sessions, safeKey); |
|||
} |
|||
}); |
|||
}; |
|||
|
|||
var addTokenForKey = function (Sessions, publicKey, token) { |
|||
if (!Sessions[publicKey]) { throw new Error('undefined user'); } |
|||
|
|||
var user = Core.getSession(Sessions, publicKey); |
|||
user.tokens.push(token); |
|||
user.atime = +new Date(); |
|||
if (user.tokens.length > 2) { user.tokens.shift(); } |
|||
}; |
|||
|
|||
Core.isValidCookie = function (Sessions, publicKey, cookie) { |
|||
var parsed = parseCookie(cookie); |
|||
if (!parsed) { return false; } |
|||
|
|||
var now = +new Date(); |
|||
|
|||
if (!parsed.time) { return false; } |
|||
if (isTooOld(parsed.time, now)) { |
|||
return false; |
|||
} |
|||
|
|||
// different process. try harder
|
|||
if (process.pid !== parsed.pid) { |
|||
return false; |
|||
} |
|||
|
|||
var user = Core.getSession(Sessions, publicKey); |
|||
if (!user) { return false; } |
|||
|
|||
var idx = user.tokens.indexOf(parsed.seq); |
|||
if (idx === -1) { return false; } |
|||
|
|||
if (idx > 0) { |
|||
// make a new token
|
|||
addTokenForKey(Sessions, publicKey, Core.makeToken()); |
|||
} |
|||
|
|||
return true; |
|||
}; |
|||
|
|||
Core.checkSignature = function (Env, signedMsg, signature, publicKey) { |
|||
if (!(signedMsg && publicKey)) { return false; } |
|||
|
|||
var signedBuffer; |
|||
var pubBuffer; |
|||
var signatureBuffer; |
|||
|
|||
try { |
|||
signedBuffer = Nacl.util.decodeUTF8(signedMsg); |
|||
} catch (e) { |
|||
Env.Log.error('INVALID_SIGNED_BUFFER', signedMsg); |
|||
return null; |
|||
} |
|||
|
|||
try { |
|||
pubBuffer = Nacl.util.decodeBase64(publicKey); |
|||
} catch (e) { |
|||
return false; |
|||
} |
|||
|
|||
try { |
|||
signatureBuffer = Nacl.util.decodeBase64(signature); |
|||
} catch (e) { |
|||
return false; |
|||
} |
|||
|
|||
if (pubBuffer.length !== 32) { |
|||
Env.Log.error('PUBLIC_KEY_LENGTH', publicKey); |
|||
return false; |
|||
} |
|||
|
|||
if (signatureBuffer.length !== 64) { |
|||
return false; |
|||
} |
|||
|
|||
return Nacl.sign.detached.verify(signedBuffer, signatureBuffer, pubBuffer); |
|||
}; |
|||
|
|||
// E_NO_OWNERS
|
|||
Core.hasOwners = function (metadata) { |
|||
return Boolean(metadata && Array.isArray(metadata.owners)); |
|||
}; |
|||
|
|||
Core.hasPendingOwners = function (metadata) { |
|||
return Boolean(metadata && Array.isArray(metadata.pending_owners)); |
|||
}; |
|||
|
|||
// INSUFFICIENT_PERMISSIONS
|
|||
Core.isOwner = function (metadata, unsafeKey) { |
|||
return metadata.owners.indexOf(unsafeKey) !== -1; |
|||
}; |
|||
|
|||
Core.isPendingOwner = function (metadata, unsafeKey) { |
|||
return metadata.pending_owners.indexOf(unsafeKey) !== -1; |
|||
}; |
|||
|
|||
Core.haveACookie = function (Env, safeKey, cb) { |
|||
cb(); |
|||
}; |
|||
|
|||
@ -0,0 +1,128 @@ |
|||
/*jshint esversion: 6 */ |
|||
const Data = module.exports; |
|||
|
|||
const Meta = require("../metadata"); |
|||
const BatchRead = require("../batch-read"); |
|||
const WriteQueue = require("../write-queue"); |
|||
const Core = require("./core"); |
|||
const Util = require("../common-util"); |
|||
|
|||
const batchMetadata = BatchRead("GET_METADATA"); |
|||
Data.getMetadata = function (Env, channel, cb/* , Server */) { |
|||
if (!Core.isValidId(channel)) { return void cb('INVALID_CHAN'); } |
|||
if (channel.length !== 32) { return cb("INVALID_CHAN_LENGTH"); } |
|||
|
|||
// FIXME get metadata from the server cache if it is available
|
|||
batchMetadata(channel, cb, function (done) { |
|||
var ref = {}; |
|||
var lineHandler = Meta.createLineHandler(ref, Env.Log.error); |
|||
|
|||
return void Env.msgStore.readChannelMetadata(channel, lineHandler, function (err) { |
|||
if (err) { |
|||
// stream errors?
|
|||
return void done(err); |
|||
} |
|||
done(void 0, ref.meta); |
|||
}); |
|||
}); |
|||
}; |
|||
|
|||
/* setMetadata |
|||
- write a new line to the metadata log if a valid command is provided |
|||
- data is an object: { |
|||
channel: channelId, |
|||
command: metadataCommand (string), |
|||
value: value |
|||
} |
|||
*/ |
|||
var queueMetadata = WriteQueue(); |
|||
Data.setMetadata = function (Env, safeKey, data, cb, Server) { |
|||
var unsafeKey = Util.unescapeKeyCharacters(safeKey); |
|||
|
|||
var channel = data.channel; |
|||
var command = data.command; |
|||
if (!channel || !Core.isValidId(channel)) { return void cb ('INVALID_CHAN'); } |
|||
if (!command || typeof (command) !== 'string') { return void cb ('INVALID_COMMAND'); } |
|||
if (Meta.commands.indexOf(command) === -1) { return void('UNSUPPORTED_COMMAND'); } |
|||
|
|||
queueMetadata(channel, function (next) { |
|||
Data.getMetadata(Env, channel, function (err, metadata) { |
|||
if (err) { |
|||
cb(err); |
|||
return void next(); |
|||
} |
|||
if (!Core.hasOwners(metadata)) { |
|||
cb('E_NO_OWNERS'); |
|||
return void next(); |
|||
} |
|||
|
|||
// if you are a pending owner and not an owner
|
|||
// you can either ADD_OWNERS, or RM_PENDING_OWNERS
|
|||
// and you should only be able to add yourself as an owner
|
|||
// everything else should be rejected
|
|||
// else if you are not an owner
|
|||
// you should be rejected
|
|||
// else write the command
|
|||
|
|||
// Confirm that the channel is owned by the user in question
|
|||
// or the user is accepting a pending ownership offer
|
|||
if (Core.hasPendingOwners(metadata) && |
|||
Core.isPendingOwner(metadata, unsafeKey) && |
|||
!Core.isOwner(metadata, unsafeKey)) { |
|||
|
|||
// If you are a pending owner, make sure you can only add yourelf as an owner
|
|||
if ((command !== 'ADD_OWNERS' && command !== 'RM_PENDING_OWNERS') |
|||
|| !Array.isArray(data.value) |
|||
|| data.value.length !== 1 |
|||
|| data.value[0] !== unsafeKey) { |
|||
cb('INSUFFICIENT_PERMISSIONS'); |
|||
return void next(); |
|||
} |
|||
// FIXME wacky fallthrough is hard to read
|
|||
// we could pass this off to a writeMetadataCommand function
|
|||
// and make the flow easier to follow
|
|||
} else if (!Core.isOwner(metadata, unsafeKey)) { |
|||
cb('INSUFFICIENT_PERMISSIONS'); |
|||
return void next(); |
|||
} |
|||
|
|||
// Add the new metadata line
|
|||
var line = [command, data.value, +new Date()]; |
|||
var changed = false; |
|||
try { |
|||
changed = Meta.handleCommand(metadata, line); |
|||
} catch (e) { |
|||
cb(e); |
|||
return void next(); |
|||
} |
|||
|
|||
// if your command is valid but it didn't result in any change to the metadata,
|
|||
// call back now and don't write any "useless" line to the log
|
|||
if (!changed) { |
|||
cb(void 0, metadata); |
|||
return void next(); |
|||
} |
|||
Env.msgStore.writeMetadata(channel, JSON.stringify(line), function (e) { |
|||
if (e) { |
|||
cb(e); |
|||
return void next(); |
|||
} |
|||
|
|||
cb(void 0, metadata); |
|||
next(); |
|||
|
|||
const metadata_cache = Env.historyKeeper.metadata_cache; |
|||
const channel_cache = Env.historyKeeper.channel_cache; |
|||
|
|||
metadata_cache[channel] = metadata; |
|||
|
|||
var index = Util.find(channel_cache, [channel, 'index']); |
|||
if (index && typeof(index) === 'object') { index.metadata = metadata; } |
|||
|
|||
Server.channelBroadcast(channel, JSON.stringify(metadata), Env.historyKeeper.id); |
|||
}); |
|||
}); |
|||
}); |
|||
}; |
|||
|
|||
|
|||
@ -0,0 +1,464 @@ |
|||
/*jshint esversion: 6 */ |
|||
const Core = require("./core"); |
|||
|
|||
const BatchRead = require("../batch-read"); |
|||
const Pins = require("../pins"); |
|||
|
|||
const Pinning = module.exports; |
|||
const Nacl = require("tweetnacl/nacl-fast"); |
|||
const Util = require("../common-util"); |
|||
const nThen = require("nthen"); |
|||
const Saferphore = require("saferphore"); |
|||
const Pinned = require('../../scripts/pinned'); |
|||
|
|||
//const escapeKeyCharacters = Util.escapeKeyCharacters;
|
|||
const unescapeKeyCharacters = Util.unescapeKeyCharacters; |
|||
|
|||
var sumChannelSizes = function (sizes) { |
|||
return Object.keys(sizes).map(function (id) { return sizes[id]; }) |
|||
.filter(function (x) { |
|||
// only allow positive numbers
|
|||
return !(typeof(x) !== 'number' || x <= 0); |
|||
}) |
|||
.reduce(function (a, b) { return a + b; }, 0); |
|||
}; |
|||
|
|||
// FIXME it's possible for this to respond before the server has had a chance
|
|||
// to fetch the limits. Maybe we should respond with an error...
|
|||
// or wait until we actually know the limits before responding
|
|||
var getLimit = Pinning.getLimit = function (Env, publicKey, cb) { |
|||
var unescapedKey = unescapeKeyCharacters(publicKey); |
|||
var limit = Env.limits[unescapedKey]; |
|||
var defaultLimit = typeof(Env.defaultStorageLimit) === 'number'? |
|||
Env.defaultStorageLimit: Core.DEFAULT_LIMIT; |
|||
|
|||
var toSend = limit && typeof(limit.limit) === "number"? |
|||
[limit.limit, limit.plan, limit.note] : [defaultLimit, '', '']; |
|||
|
|||
cb(void 0, toSend); |
|||
}; |
|||
|
|||
var addPinned = function ( |
|||
Env, |
|||
publicKey /*:string*/, |
|||
channelList /*Array<string>*/, |
|||
cb /*:()=>void*/) |
|||
{ |
|||
Env.evPinnedPadsReady.reg(() => { |
|||
channelList.forEach((c) => { |
|||
const x = Env.pinnedPads[c] = Env.pinnedPads[c] || {}; |
|||
x[publicKey] = 1; |
|||
}); |
|||
cb(); |
|||
}); |
|||
}; |
|||
var removePinned = function ( |
|||
Env, |
|||
publicKey /*:string*/, |
|||
channelList /*Array<string>*/, |
|||
cb /*:()=>void*/) |
|||
{ |
|||
Env.evPinnedPadsReady.reg(() => { |
|||
channelList.forEach((c) => { |
|||
const x = Env.pinnedPads[c]; |
|||
if (!x) { return; } |
|||
delete x[publicKey]; |
|||
}); |
|||
cb(); |
|||
}); |
|||
}; |
|||
|
|||
var getMultipleFileSize = function (Env, channels, cb) { |
|||
if (!Array.isArray(channels)) { return cb('INVALID_PIN_LIST'); } |
|||
if (typeof(Env.msgStore.getChannelSize) !== 'function') { |
|||
return cb('GET_CHANNEL_SIZE_UNSUPPORTED'); |
|||
} |
|||
|
|||
var i = channels.length; |
|||
var counts = {}; |
|||
|
|||
var done = function () { |
|||
i--; |
|||
if (i === 0) { return cb(void 0, counts); } |
|||
}; |
|||
|
|||
channels.forEach(function (channel) { |
|||
Pinning.getFileSize(Env, channel, function (e, size) { |
|||
if (e) { |
|||
// most likely error here is that a file no longer exists
|
|||
// but a user still has it in their drive, and wants to know
|
|||
// its size. We should find a way to inform them of this in
|
|||
// the future. For now we can just tell them it has no size.
|
|||
|
|||
//WARN('getFileSize', e);
|
|||
counts[channel] = 0; |
|||
return done(); |
|||
} |
|||
counts[channel] = size; |
|||
done(); |
|||
}); |
|||
}); |
|||
}; |
|||
|
|||
const batchUserPins = BatchRead("LOAD_USER_PINS"); |
|||
var loadUserPins = function (Env, publicKey, cb) { |
|||
var session = Core.getSession(Env.Sessions, publicKey); |
|||
|
|||
if (session.channels) { |
|||
return cb(session.channels); |
|||
} |
|||
|
|||
batchUserPins(publicKey, cb, function (done) { |
|||
var ref = {}; |
|||
var lineHandler = Pins.createLineHandler(ref, function (label, data) { |
|||
Env.Log.error(label, { |
|||
log: publicKey, |
|||
data: data, |
|||
}); |
|||
}); |
|||
|
|||
// if channels aren't in memory. load them from disk
|
|||
Env.pinStore.getMessages(publicKey, lineHandler, function () { |
|||
// no more messages
|
|||
|
|||
// only put this into the cache if it completes
|
|||
session.channels = ref.pins; |
|||
done(ref.pins); // FIXME no error handling?
|
|||
}); |
|||
}); |
|||
}; |
|||
|
|||
var truthyKeys = function (O) { |
|||
return Object.keys(O).filter(function (k) { |
|||
return O[k]; |
|||
}); |
|||
}; |
|||
|
|||
var getChannelList = Pinning.getChannelList = function (Env, publicKey, _cb) { |
|||
var cb = Util.once(Util.mkAsync(_cb)); |
|||
loadUserPins(Env, publicKey, function (pins) { |
|||
cb(truthyKeys(pins)); |
|||
}); |
|||
}; |
|||
|
|||
const batchTotalSize = BatchRead("GET_TOTAL_SIZE"); |
|||
Pinning.getTotalSize = function (Env, publicKey, cb) { |
|||
var unescapedKey = unescapeKeyCharacters(publicKey); |
|||
var limit = Env.limits[unescapedKey]; |
|||
|
|||
// Get a common key if multiple users share the same quota, otherwise take the public key
|
|||
var batchKey = (limit && Array.isArray(limit.users)) ? limit.users.join('') : publicKey; |
|||
|
|||
batchTotalSize(batchKey, cb, function (done) { |
|||
var channels = []; |
|||
var bytes = 0; |
|||
nThen(function (waitFor) { |
|||
// Get the channels list for our user account
|
|||
Pinning.getChannelList(Env, publicKey, waitFor(function (_channels) { |
|||
if (!_channels) { |
|||
waitFor.abort(); |
|||
return done('INVALID_PIN_LIST'); |
|||
} |
|||
Array.prototype.push.apply(channels, _channels); |
|||
})); |
|||
// Get the channels list for users sharing our quota
|
|||
if (limit && Array.isArray(limit.users) && limit.users.length > 1) { |
|||
limit.users.forEach(function (key) { |
|||
if (key === unescapedKey) { return; } // Don't count ourselves twice
|
|||
getChannelList(Env, key, waitFor(function (_channels) { |
|||
if (!_channels) { return; } // Broken user, don't count their quota
|
|||
Array.prototype.push.apply(channels, _channels); |
|||
})); |
|||
}); |
|||
} |
|||
}).nThen(function (waitFor) { |
|||
// Get size of the channels
|
|||
var list = []; // Contains the channels already counted in the quota to avoid duplicates
|
|||
channels.forEach(function (channel) { // TODO semaphore?
|
|||
if (list.indexOf(channel) !== -1) { return; } |
|||
list.push(channel); |
|||
Pinning.getFileSize(Env, channel, waitFor(function (e, size) { |
|||
if (!e) { bytes += size; } |
|||
})); |
|||
}); |
|||
}).nThen(function () { |
|||
done(void 0, bytes); |
|||
}); |
|||
}); |
|||
}; |
|||
|
|||
/* Users should be able to clear their own pin log with an authenticated RPC |
|||
*/ |
|||
Pinning.removePins = function (Env, safeKey, cb) { |
|||
if (typeof(Env.pinStore.removeChannel) !== 'function') { |
|||
return void cb("E_NOT_IMPLEMENTED"); |
|||
} |
|||
Env.pinStore.removeChannel(safeKey, function (err) { |
|||
Env.Log.info('DELETION_PIN_BY_OWNER_RPC', { |
|||
safeKey: safeKey, |
|||
status: err? String(err): 'SUCCESS', |
|||
}); |
|||
|
|||
if (err) { return void cb(err); } |
|||
cb(void 0, 'OK'); |
|||
}); |
|||
}; |
|||
|
|||
Pinning.trimPins = function (Env, safeKey, cb) { |
|||
cb("NOT_IMPLEMENTED"); |
|||
}; |
|||
|
|||
var getFreeSpace = Pinning.getFreeSpace = function (Env, publicKey, cb) { |
|||
getLimit(Env, publicKey, function (e, limit) { |
|||
if (e) { return void cb(e); } |
|||
Pinning.getTotalSize(Env, publicKey, function (e, size) { |
|||
if (typeof(size) === 'undefined') { return void cb(e); } |
|||
|
|||
var rem = limit[0] - size; |
|||
if (typeof(rem) !== 'number') { |
|||
return void cb('invalid_response'); |
|||
} |
|||
cb(void 0, rem); |
|||
}); |
|||
}); |
|||
}; |
|||
|
|||
var hashChannelList = function (A) { |
|||
var uniques = []; |
|||
|
|||
A.forEach(function (a) { |
|||
if (uniques.indexOf(a) === -1) { uniques.push(a); } |
|||
}); |
|||
uniques.sort(); |
|||
|
|||
var hash = Nacl.util.encodeBase64(Nacl.hash(Nacl |
|||
.util.decodeUTF8(JSON.stringify(uniques)))); |
|||
|
|||
return hash; |
|||
}; |
|||
|
|||
var getHash = Pinning.getHash = function (Env, publicKey, cb) { |
|||
getChannelList(Env, publicKey, function (channels) { |
|||
cb(void 0, hashChannelList(channels)); |
|||
}); |
|||
}; |
|||
|
|||
Pinning.pinChannel = function (Env, publicKey, channels, cb) { |
|||
if (!channels && channels.filter) { |
|||
return void cb('INVALID_PIN_LIST'); |
|||
} |
|||
|
|||
// get channel list ensures your session has a cached channel list
|
|||
getChannelList(Env, publicKey, function (pinned) { |
|||
var session = Core.getSession(Env.Sessions, publicKey); |
|||
|
|||
// only pin channels which are not already pinned
|
|||
var toStore = channels.filter(function (channel) { |
|||
return pinned.indexOf(channel) === -1; |
|||
}); |
|||
|
|||
if (toStore.length === 0) { |
|||
return void getHash(Env, publicKey, cb); |
|||
} |
|||
|
|||
getMultipleFileSize(Env, toStore, function (e, sizes) { |
|||
if (typeof(sizes) === 'undefined') { return void cb(e); } |
|||
var pinSize = sumChannelSizes(sizes); |
|||
|
|||
getFreeSpace(Env, publicKey, function (e, free) { |
|||
if (typeof(free) === 'undefined') { |
|||
Env.WARN('getFreeSpace', e); |
|||
return void cb(e); |
|||
} |
|||
if (pinSize > free) { return void cb('E_OVER_LIMIT'); } |
|||
|
|||
Env.pinStore.message(publicKey, JSON.stringify(['PIN', toStore, +new Date()]), |
|||
function (e) { |
|||
if (e) { return void cb(e); } |
|||
toStore.forEach(function (channel) { |
|||
session.channels[channel] = true; |
|||
}); |
|||
addPinned(Env, publicKey, toStore, () => {}); |
|||
getHash(Env, publicKey, cb); |
|||
}); |
|||
}); |
|||
}); |
|||
}); |
|||
}; |
|||
|
|||
Pinning.unpinChannel = function (Env, publicKey, channels, cb) { |
|||
if (!channels && channels.filter) { |
|||
// expected array
|
|||
return void cb('INVALID_PIN_LIST'); |
|||
} |
|||
|
|||
getChannelList(Env, publicKey, function (pinned) { |
|||
var session = Core.getSession(Env.Sessions, publicKey); |
|||
|
|||
// only unpin channels which are pinned
|
|||
var toStore = channels.filter(function (channel) { |
|||
return pinned.indexOf(channel) !== -1; |
|||
}); |
|||
|
|||
if (toStore.length === 0) { |
|||
return void getHash(Env, publicKey, cb); |
|||
} |
|||
|
|||
Env.pinStore.message(publicKey, JSON.stringify(['UNPIN', toStore, +new Date()]), |
|||
function (e) { |
|||
if (e) { return void cb(e); } |
|||
toStore.forEach(function (channel) { |
|||
delete session.channels[channel]; |
|||
}); |
|||
removePinned(Env, publicKey, toStore, () => {}); |
|||
getHash(Env, publicKey, cb); |
|||
}); |
|||
}); |
|||
}; |
|||
|
|||
Pinning.resetUserPins = function (Env, publicKey, channelList, cb) { |
|||
if (!Array.isArray(channelList)) { return void cb('INVALID_PIN_LIST'); } |
|||
var session = Core.getSession(Env.Sessions, publicKey); |
|||
|
|||
if (!channelList.length) { |
|||
return void getHash(Env, publicKey, function (e, hash) { |
|||
if (e) { return cb(e); } |
|||
cb(void 0, hash); |
|||
}); |
|||
} |
|||
|
|||
var pins = {}; |
|||
getMultipleFileSize(Env, channelList, function (e, sizes) { |
|||
if (typeof(sizes) === 'undefined') { return void cb(e); } |
|||
var pinSize = sumChannelSizes(sizes); |
|||
|
|||
|
|||
getLimit(Env, publicKey, function (e, limit) { |
|||
if (e) { |
|||
Env.WARN('[RESET_ERR]', e); |
|||
return void cb(e); |
|||
} |
|||
|
|||
/* we want to let people pin, even if they are over their limit, |
|||
but they should only be able to do this once. |
|||
|
|||
This prevents data loss in the case that someone registers, but |
|||
does not have enough free space to pin their migrated data. |
|||
|
|||
They will not be able to pin additional pads until they upgrade |
|||
or delete enough files to go back under their limit. */ |
|||
if (pinSize > limit[0] && session.hasPinned) { return void(cb('E_OVER_LIMIT')); } |
|||
Env.pinStore.message(publicKey, JSON.stringify(['RESET', channelList, +new Date()]), |
|||
function (e) { |
|||
if (e) { return void cb(e); } |
|||
channelList.forEach(function (channel) { |
|||
pins[channel] = true; |
|||
}); |
|||
|
|||
var oldChannels; |
|||
if (session.channels && typeof(session.channels) === 'object') { |
|||
oldChannels = Object.keys(session.channels); |
|||
} else { |
|||
oldChannels = []; |
|||
} |
|||
removePinned(Env, publicKey, oldChannels, () => { |
|||
addPinned(Env, publicKey, channelList, ()=>{}); |
|||
}); |
|||
|
|||
// update in-memory cache IFF the reset was allowed.
|
|||
session.channels = pins; |
|||
getHash(Env, publicKey, function (e, hash) { |
|||
cb(e, hash); |
|||
}); |
|||
}); |
|||
}); |
|||
}); |
|||
}; |
|||
|
|||
Pinning.getFileSize = function (Env, channel, _cb) { |
|||
var cb = Util.once(Util.mkAsync(_cb)); |
|||
if (!Core.isValidId(channel)) { return void cb('INVALID_CHAN'); } |
|||
if (channel.length === 32) { |
|||
if (typeof(Env.msgStore.getChannelSize) !== 'function') { |
|||
return cb('GET_CHANNEL_SIZE_UNSUPPORTED'); |
|||
} |
|||
|
|||
return void Env.msgStore.getChannelSize(channel, function (e, size /*:number*/) { |
|||
if (e) { |
|||
if (e.code === 'ENOENT') { return void cb(void 0, 0); } |
|||
return void cb(e.code); |
|||
} |
|||
cb(void 0, size); |
|||
}); |
|||
} |
|||
|
|||
// 'channel' refers to a file, so you need another API
|
|||
Env.blobStore.size(channel, function (e, size) { |
|||
if (typeof(size) === 'undefined') { return void cb(e); } |
|||
cb(void 0, size); |
|||
}); |
|||
}; |
|||
|
|||
/* accepts a list, and returns a sublist of channel or file ids which seem |
|||
to have been deleted from the server (file size 0) |
|||
|
|||
we might consider that we should only say a file is gone if fs.stat returns |
|||
ENOENT, but for now it's simplest to just rely on getFileSize... |
|||
*/ |
|||
Pinning.getDeletedPads = function (Env, channels, cb) { |
|||
if (!Array.isArray(channels)) { return cb('INVALID_LIST'); } |
|||
var L = channels.length; |
|||
|
|||
var sem = Saferphore.create(10); |
|||
var absentees = []; |
|||
|
|||
var job = function (channel, wait) { |
|||
return function (give) { |
|||
Pinning.getFileSize(Env, channel, wait(give(function (e, size) { |
|||
if (e) { return; } |
|||
if (size === 0) { absentees.push(channel); } |
|||
}))); |
|||
}; |
|||
}; |
|||
|
|||
nThen(function (w) { |
|||
for (var i = 0; i < L; i++) { |
|||
sem.take(job(channels[i], w)); |
|||
} |
|||
}).nThen(function () { |
|||
cb(void 0, absentees); |
|||
}); |
|||
}; |
|||
|
|||
// inform that the
|
|||
Pinning.loadChannelPins = function (Env) { |
|||
Pinned.load(function (err, data) { |
|||
if (err) { |
|||
Env.Log.error("LOAD_CHANNEL_PINS", err); |
|||
|
|||
// FIXME not sure what should be done here instead
|
|||
Env.pinnedPads = {}; |
|||
Env.evPinnedPadsReady.fire(); |
|||
return; |
|||
} |
|||
|
|||
|
|||
Env.pinnedPads = data; |
|||
Env.evPinnedPadsReady.fire(); |
|||
}, { |
|||
pinPath: Env.paths.pin, |
|||
}); |
|||
}; |
|||
|
|||
Pinning.isChannelPinned = function (Env, channel, cb) { |
|||
Env.evPinnedPadsReady.reg(() => { |
|||
if (Env.pinnedPads[channel] && Object.keys(Env.pinnedPads[channel]).length) { // FIXME 'Object.keys' here is overkill. We only need to know that it isn't empty
|
|||
cb(void 0, true); |
|||
} else { |
|||
delete Env.pinnedPads[channel]; |
|||
cb(void 0, false); |
|||
} |
|||
}); |
|||
}; |
|||
|
|||
|
|||
@ -0,0 +1,107 @@ |
|||
/*jshint esversion: 6 */ |
|||
/* globals Buffer*/ |
|||
const Quota = module.exports; |
|||
|
|||
const Util = require("../common-util"); |
|||
const Package = require('../../package.json'); |
|||
const Https = require("https"); |
|||
|
|||
Quota.applyCustomLimits = function (Env) { |
|||
var isLimit = function (o) { |
|||
var valid = o && typeof(o) === 'object' && |
|||
typeof(o.limit) === 'number' && |
|||
typeof(o.plan) === 'string' && |
|||
typeof(o.note) === 'string'; |
|||
return valid; |
|||
}; |
|||
|
|||
// read custom limits from the Environment (taken from config)
|
|||
var customLimits = (function (custom) { |
|||
var limits = {}; |
|||
Object.keys(custom).forEach(function (k) { |
|||
k.replace(/\/([^\/]+)$/, function (all, safeKey) { |
|||
var id = Util.unescapeKeyCharacters(safeKey || ''); |
|||
limits[id] = custom[k]; |
|||
return ''; |
|||
}); |
|||
}); |
|||
return limits; |
|||
}(Env.customLimits || {})); |
|||
|
|||
Object.keys(customLimits).forEach(function (k) { |
|||
if (!isLimit(customLimits[k])) { return; } |
|||
Env.limits[k] = customLimits[k]; |
|||
}); |
|||
}; |
|||
|
|||
Quota.updateCachedLimits = function (Env, cb) { |
|||
if (Env.adminEmail === false) { |
|||
Quota.applyCustomLimits(Env); |
|||
if (Env.allowSubscriptions === false) { return; } |
|||
throw new Error("allowSubscriptions must be false if adminEmail is false"); |
|||
} |
|||
|
|||
var body = JSON.stringify({ |
|||
domain: Env.myDomain, |
|||
subdomain: Env.mySubdomain || null, |
|||
adminEmail: Env.adminEmail, |
|||
version: Package.version |
|||
}); |
|||
var options = { |
|||
host: 'accounts.cryptpad.fr', |
|||
path: '/api/getauthorized', |
|||
method: 'POST', |
|||
headers: { |
|||
"Content-Type": "application/json", |
|||
"Content-Length": Buffer.byteLength(body) |
|||
} |
|||
}; |
|||
|
|||
var req = Https.request(options, function (response) { |
|||
if (!('' + response.statusCode).match(/^2\d\d$/)) { |
|||
return void cb('SERVER ERROR ' + response.statusCode); |
|||
} |
|||
var str = ''; |
|||
|
|||
response.on('data', function (chunk) { |
|||
str += chunk; |
|||
}); |
|||
|
|||
response.on('end', function () { |
|||
try { |
|||
var json = JSON.parse(str); |
|||
Env.limits = json; |
|||
Quota.applyCustomLimits(Env); |
|||
cb(void 0); |
|||
} catch (e) { |
|||
cb(e); |
|||
} |
|||
}); |
|||
}); |
|||
|
|||
req.on('error', function (e) { |
|||
Quota.applyCustomLimits(Env); |
|||
// FIXME this is always falsey. Maybe we just suppress errors?
|
|||
if (!Env.domain) { return cb(); } |
|||
cb(e); |
|||
}); |
|||
|
|||
req.end(body); |
|||
}; |
|||
|
|||
// The limits object contains storage limits for all the publicKey that have paid
|
|||
// To each key is associated an object containing the 'limit' value and a 'note' explaining that limit
|
|||
Quota.getUpdatedLimit = function (Env, safeKey, cb) { // FIXME BATCH?S
|
|||
Quota.updateCachedLimits(Env, function (err) { |
|||
if (err) { return void cb(err); } |
|||
|
|||
var limit = Env.limits[safeKey]; |
|||
|
|||
if (limit && typeof(limit.limit) === 'number') { |
|||
return void cb(void 0, [limit.limit, limit.plan, limit.note]); |
|||
} |
|||
|
|||
return void cb(void 0, [Env.defaultStorageLimit, '', '']); |
|||
}); |
|||
}; |
|||
|
|||
@ -0,0 +1,57 @@ |
|||
/*jshint esversion: 6 */ |
|||
const Upload = module.exports; |
|||
const Util = require("../common-util"); |
|||
const Pinning = require("./pin-rpc"); |
|||
const nThen = require("nthen"); |
|||
const Core = require("./core"); |
|||
|
|||
Upload.status = function (Env, safeKey, filesize, _cb) { // FIXME FILES
|
|||
var cb = Util.once(Util.mkAsync(_cb)); |
|||
|
|||
// validate that the provided size is actually a positive number
|
|||
if (typeof(filesize) !== 'number' && |
|||
filesize >= 0) { return void cb('E_INVALID_SIZE'); } |
|||
|
|||
if (filesize >= Env.maxUploadSize) { return cb('TOO_LARGE'); } |
|||
|
|||
nThen(function (w) { |
|||
var abortAndCB = Util.both(w.abort, cb); |
|||
Env.blobStore.status(safeKey, w(function (err, inProgress) { |
|||
// if there's an error something is weird
|
|||
if (err) { return void abortAndCB(err); } |
|||
|
|||
// we cannot upload two things at once
|
|||
if (inProgress) { return void abortAndCB(void 0, true); } |
|||
})); |
|||
}).nThen(function () { |
|||
// if yuo're here then there are no pending uploads
|
|||
// check if you have space in your quota to upload something of this size
|
|||
Pinning.getFreeSpace(Env, safeKey, function (e, free) { |
|||
if (e) { return void cb(e); } |
|||
if (filesize >= free) { return cb('NOT_ENOUGH_SPACE'); } |
|||
|
|||
var user = Core.getSession(Env.Sessions, safeKey); |
|||
user.pendingUploadSize = filesize; |
|||
user.currentUploadSize = 0; |
|||
|
|||
cb(void 0, false); |
|||
}); |
|||
}); |
|||
}; |
|||
|
|||
Upload.upload = function (Env, safeKey, chunk, cb) { |
|||
Env.blobStore.upload(safeKey, chunk, cb); |
|||
}; |
|||
|
|||
Upload.complete = function (Env, safeKey, arg, cb) { |
|||
Env.blobStore.complete(safeKey, arg, cb); |
|||
}; |
|||
|
|||
Upload.cancel = function (Env, safeKey, arg, cb) { |
|||
Env.blobStore.cancel(safeKey, arg, cb); |
|||
}; |
|||
|
|||
Upload.complete_owned = function (Env, safeKey, arg, cb) { |
|||
Env.blobStore.completeOwned(safeKey, arg, cb); |
|||
}; |
|||
|
|||
@ -0,0 +1,97 @@ |
|||
/* jshint esversion: 6 */ |
|||
|
|||
const nThen = require('nthen'); |
|||
const Crypto = require('crypto'); |
|||
const WriteQueue = require("./write-queue"); |
|||
const BatchRead = require("./batch-read"); |
|||
const RPC = require("./rpc"); |
|||
const HK = require("./hk-util.js"); |
|||
|
|||
module.exports.create = function (config, cb) { |
|||
const Log = config.log; |
|||
|
|||
Log.silly('HK_LOADING', 'LOADING HISTORY_KEEPER MODULE'); |
|||
|
|||
// TODO populate Env with everything that you use from config
|
|||
// so that you can stop passing around your raw config
|
|||
// and more easily share state between historyKeeper and rpc
|
|||
const Env = { |
|||
Log: Log, |
|||
// tasks
|
|||
// store
|
|||
id: Crypto.randomBytes(8).toString('hex'), |
|||
|
|||
metadata_cache: {}, |
|||
channel_cache: {}, |
|||
queueStorage: WriteQueue(), |
|||
batchIndexReads: BatchRead("HK_GET_INDEX"), |
|||
}; |
|||
|
|||
config.historyKeeper = { |
|||
metadata_cache: Env.metadata_cache, |
|||
channel_cache: Env.channel_cache, |
|||
|
|||
id: Env.id, |
|||
|
|||
channelMessage: function (Server, channel, msgStruct) { |
|||
// netflux-server emits 'channelMessage' events whenever someone broadcasts to a channel
|
|||
// historyKeeper stores these messages if the channel id indicates that they are
|
|||
// a channel type with permanent history
|
|||
HK.onChannelMessage(Env, Server, channel, msgStruct); |
|||
}, |
|||
channelClose: function (channelName) { |
|||
// netflux-server emits 'channelClose' events whenever everyone leaves a channel
|
|||
// we drop cached metadata and indexes at the same time
|
|||
HK.dropChannel(Env, channelName); |
|||
}, |
|||
channelOpen: function (Server, channelName, userId) { |
|||
Env.channel_cache[channelName] = {}; |
|||
Server.send(userId, [ |
|||
0, |
|||
Env.id, |
|||
'JOIN', |
|||
channelName |
|||
]); |
|||
}, |
|||
directMessage: function (Server, seq, userId, json) { |
|||
// netflux-server allows you to register an id with a handler
|
|||
// this handler is invoked every time someone sends a message to that id
|
|||
HK.onDirectMessage(Env, Server, seq, userId, json); |
|||
}, |
|||
}; |
|||
|
|||
Log.verbose('HK_ID', 'History keeper ID: ' + Env.id); |
|||
|
|||
nThen(function (w) { |
|||
require('../storage/file').create(config, w(function (_store) { |
|||
config.store = _store; |
|||
Env.store = _store; |
|||
})); |
|||
}).nThen(function (w) { |
|||
require("../storage/tasks").create(config, w(function (e, tasks) { |
|||
if (e) { |
|||
throw e; |
|||
} |
|||
Env.tasks = tasks; |
|||
config.tasks = tasks; |
|||
if (config.disableIntegratedTasks) { return; } |
|||
|
|||
config.intervals = config.intervals || {}; |
|||
config.intervals.taskExpiration = setInterval(function () { |
|||
tasks.runAll(function (err) { |
|||
if (err) { |
|||
// either TASK_CONCURRENCY or an error with tasks.list
|
|||
// in either case it is already logged.
|
|||
} |
|||
}); |
|||
}, 1000 * 60 * 5); // run every five minutes
|
|||
})); |
|||
}).nThen(function () { |
|||
RPC.create(config, function (err, _rpc) { |
|||
if (err) { throw err; } |
|||
|
|||
Env.rpc = _rpc; |
|||
cb(void 0, config.historyKeeper); |
|||
}); |
|||
}); |
|||
}; |
|||
@ -0,0 +1,929 @@ |
|||
/* jshint esversion: 6 */ |
|||
/* global Buffer */ |
|||
var HK = module.exports; |
|||
|
|||
const nThen = require('nthen'); |
|||
const Once = require("./once"); |
|||
const Meta = require("./metadata"); |
|||
const Nacl = require('tweetnacl/nacl-fast'); |
|||
|
|||
const now = function () { return (new Date()).getTime(); }; |
|||
const ONE_DAY = 1000 * 60 * 60 * 24; // one day in milliseconds
|
|||
|
|||
/* getHash |
|||
* this function slices off the leading portion of a message which is |
|||
most likely unique |
|||
* these "hashes" are used to identify particular messages in a channel's history |
|||
* clients store "hashes" either in memory or in their drive to query for new messages: |
|||
* when reconnecting to a pad |
|||
* when connecting to chat or a mailbox |
|||
* thus, we can't change this function without invalidating client data which: |
|||
* is encrypted clientside |
|||
* can't be easily migrated |
|||
* don't break it! |
|||
*/ |
|||
const getHash = HK.getHash = function (msg, Log) { |
|||
if (typeof(msg) !== 'string') { |
|||
if (Log) { |
|||
Log.warn('HK_GET_HASH', 'getHash() called on ' + typeof(msg) + ': ' + msg); |
|||
} |
|||
return ''; |
|||
} |
|||
return msg.slice(0,64); |
|||
}; |
|||
|
|||
// historyKeeper should explicitly store any channel
|
|||
// with a 32 character id
|
|||
const STANDARD_CHANNEL_LENGTH = HK.STANDARD_CHANNEL_LENGTH = 32; |
|||
|
|||
// historyKeeper should not store messages sent to any channel
|
|||
// with a 34 character id
|
|||
const EPHEMERAL_CHANNEL_LENGTH = HK.EPHEMERAL_CHANNEL_LENGTH = 34; |
|||
|
|||
const tryParse = function (Env, str) { |
|||
try { |
|||
return JSON.parse(str); |
|||
} catch (err) { |
|||
Env.Log.error('HK_PARSE_ERROR', err); |
|||
} |
|||
}; |
|||
|
|||
/* sliceCpIndex |
|||
returns a list of all checkpoints which might be relevant for a client connecting to a session |
|||
|
|||
* if there are two or fewer checkpoints, return everything you have |
|||
* if there are more than two |
|||
* return at least two |
|||
* plus any more which were received within the last 100 messages |
|||
|
|||
This is important because the additional history is what prevents |
|||
clients from forking on checkpoints and dropping forked history. |
|||
|
|||
*/ |
|||
const sliceCpIndex = function (cpIndex, line) { |
|||
// Remove "old" checkpoints (cp sent before 100 messages ago)
|
|||
const minLine = Math.max(0, (line - 100)); |
|||
let start = cpIndex.slice(0, -2); |
|||
const end = cpIndex.slice(-2); |
|||
start = start.filter(function (obj) { |
|||
return obj.line > minLine; |
|||
}); |
|||
return start.concat(end); |
|||
}; |
|||
|
|||
const isMetadataMessage = function (parsed) { |
|||
return Boolean(parsed && parsed.channel); |
|||
}; |
|||
|
|||
// validateKeyStrings supplied by clients must decode to 32-byte Uint8Arrays
|
|||
const isValidValidateKeyString = function (key) { |
|||
try { |
|||
return typeof(key) === 'string' && |
|||
Nacl.util.decodeBase64(key).length === Nacl.sign.publicKeyLength; |
|||
} catch (e) { |
|||
return false; |
|||
} |
|||
}; |
|||
|
|||
var CHECKPOINT_PATTERN = /^cp\|(([A-Za-z0-9+\/=]+)\|)?/; |
|||
|
|||
/* expireChannel is here to clean up channels that should have been removed |
|||
but for some reason are still present |
|||
*/ |
|||
const expireChannel = function (Env, channel) { |
|||
return void Env.store.archiveChannel(channel, function (err) { |
|||
Env.Log.info("ARCHIVAL_CHANNEL_BY_HISTORY_KEEPER_EXPIRATION", { |
|||
channelId: channel, |
|||
status: err? String(err): "SUCCESS", |
|||
}); |
|||
}); |
|||
}; |
|||
|
|||
/* dropChannel |
|||
* cleans up memory structures which are managed entirely by the historyKeeper |
|||
*/ |
|||
const dropChannel = HK.dropChannel = function (Env, chanName) { |
|||
delete Env.metadata_cache[chanName]; |
|||
delete Env.channel_cache[chanName]; |
|||
}; |
|||
|
|||
/* checkExpired |
|||
* synchronously returns true or undefined to indicate whether the channel is expired |
|||
* according to its metadata |
|||
* has some side effects: |
|||
* closes the channel via the store.closeChannel API |
|||
* and then broadcasts to all channel members that the channel has expired |
|||
* removes the channel from the netflux-server's in-memory cache |
|||
* removes the channel metadata from history keeper's in-memory cache |
|||
|
|||
FIXME the boolean nature of this API should be separated from its side effects |
|||
*/ |
|||
const checkExpired = function (Env, Server, channel) { |
|||
const store = Env.store; |
|||
const metadata_cache = Env.metadata_cache; |
|||
|
|||
if (!(channel && channel.length === STANDARD_CHANNEL_LENGTH)) { return false; } |
|||
let metadata = metadata_cache[channel]; |
|||
if (!(metadata && typeof(metadata.expire) === 'number')) { return false; } |
|||
|
|||
// the number of milliseconds ago the channel should have expired
|
|||
let pastDue = (+new Date()) - metadata.expire; |
|||
|
|||
// less than zero means that it hasn't expired yet
|
|||
if (pastDue < 0) { return false; } |
|||
|
|||
// if it should have expired more than a day ago...
|
|||
// there may have been a problem with scheduling tasks
|
|||
// or the scheduled tasks may not be running
|
|||
// so trigger a removal from here
|
|||
if (pastDue >= ONE_DAY) { expireChannel(Env, channel); } |
|||
|
|||
// close the channel
|
|||
store.closeChannel(channel, function () { |
|||
Server.channelBroadcast(channel, { |
|||
error: 'EEXPIRED', |
|||
channel: channel |
|||
}, Env.id); |
|||
dropChannel(channel); |
|||
}); |
|||
|
|||
// return true to indicate that it has expired
|
|||
return true; |
|||
}; |
|||
|
|||
/* computeIndex |
|||
can call back with an error or a computed index which includes: |
|||
* cpIndex: |
|||
* array including any checkpoints pushed within the last 100 messages |
|||
* processed by 'sliceCpIndex(cpIndex, line)' |
|||
* offsetByHash: |
|||
* a map containing message offsets by their hash |
|||
* this is for every message in history, so it could be very large... |
|||
* except we remove offsets from the map if they occur before the oldest relevant checkpoint |
|||
* size: in bytes |
|||
* metadata: |
|||
* validationKey |
|||
* expiration time |
|||
* owners |
|||
* ??? (anything else we might add in the future) |
|||
* line |
|||
* the number of messages in history |
|||
* including the initial metadata line, if it exists |
|||
|
|||
*/ |
|||
const computeIndex = function (Env, channelName, cb) { |
|||
const store = Env.store; |
|||
const Log = Env.Log; |
|||
|
|||
const cpIndex = []; |
|||
let messageBuf = []; |
|||
let metadata; |
|||
let i = 0; |
|||
|
|||
const ref = {}; |
|||
|
|||
const CB = Once(cb); |
|||
|
|||
const offsetByHash = {}; |
|||
let size = 0; |
|||
nThen(function (w) { |
|||
// iterate over all messages in the channel log
|
|||
// old channels can contain metadata as the first message of the log
|
|||
// remember metadata the first time you encounter it
|
|||
// otherwise index important messages in the log
|
|||
store.readMessagesBin(channelName, 0, (msgObj, readMore) => { |
|||
let msg; |
|||
// keep an eye out for the metadata line if you haven't already seen it
|
|||
// but only check for metadata on the first line
|
|||
if (!i && !metadata && msgObj.buff.indexOf('{') === 0) { |
|||
i++; // always increment the message counter
|
|||
msg = tryParse(Env, msgObj.buff.toString('utf8')); |
|||
if (typeof msg === "undefined") { return readMore(); } |
|||
|
|||
// validate that the current line really is metadata before storing it as such
|
|||
if (isMetadataMessage(msg)) { |
|||
metadata = msg; |
|||
return readMore(); |
|||
} |
|||
} |
|||
i++; |
|||
if (msgObj.buff.indexOf('cp|') > -1) { |
|||
msg = msg || tryParse(Env, msgObj.buff.toString('utf8')); |
|||
if (typeof msg === "undefined") { return readMore(); } |
|||
// cache the offsets of checkpoints if they can be parsed
|
|||
if (msg[2] === 'MSG' && msg[4].indexOf('cp|') === 0) { |
|||
cpIndex.push({ |
|||
offset: msgObj.offset, |
|||
line: i |
|||
}); |
|||
// we only want to store messages since the latest checkpoint
|
|||
// so clear the buffer every time you see a new one
|
|||
messageBuf = []; |
|||
} |
|||
} |
|||
// if it's not metadata or a checkpoint then it should be a regular message
|
|||
// store it in the buffer
|
|||
messageBuf.push(msgObj); |
|||
return readMore(); |
|||
}, w((err) => { |
|||
if (err && err.code !== 'ENOENT') { |
|||
w.abort(); |
|||
return void CB(err); |
|||
} |
|||
|
|||
// once indexing is complete you should have a buffer of messages since the latest checkpoint
|
|||
// map the 'hash' of each message to its byte offset in the log, to be used for reconnecting clients
|
|||
messageBuf.forEach((msgObj) => { |
|||
const msg = tryParse(Env, msgObj.buff.toString('utf8')); |
|||
if (typeof msg === "undefined") { return; } |
|||
if (msg[0] === 0 && msg[2] === 'MSG' && typeof(msg[4]) === 'string') { |
|||
// msgObj.offset is API guaranteed by our storage module
|
|||
// it should always be a valid positive integer
|
|||
offsetByHash[getHash(msg[4], Log)] = msgObj.offset; |
|||
} |
|||
// There is a trailing \n at the end of the file
|
|||
size = msgObj.offset + msgObj.buff.length + 1; |
|||
}); |
|||
})); |
|||
}).nThen(function (w) { |
|||
// create a function which will iterate over amendments to the metadata
|
|||
const handler = Meta.createLineHandler(ref, Log.error); |
|||
|
|||
// initialize the accumulator in case there was a foundational metadata line in the log content
|
|||
if (metadata) { handler(void 0, metadata); } |
|||
|
|||
// iterate over the dedicated metadata log (if it exists)
|
|||
// proceed even in the event of a stream error on the metadata log
|
|||
store.readDedicatedMetadata(channelName, handler, w(function (err) { |
|||
if (err) { |
|||
return void Log.error("DEDICATED_METADATA_ERROR", err); |
|||
} |
|||
})); |
|||
}).nThen(function () { |
|||
// when all is done, cache the metadata in memory
|
|||
if (ref.index) { // but don't bother if no metadata was found...
|
|||
metadata = Env.metadata_cache[channelName] = ref.meta; |
|||
} |
|||
// and return the computed index
|
|||
CB(null, { |
|||
// Only keep the checkpoints included in the last 100 messages
|
|||
cpIndex: sliceCpIndex(cpIndex, i), |
|||
offsetByHash: offsetByHash, |
|||
size: size, |
|||
metadata: metadata, |
|||
line: i |
|||
}); |
|||
}); |
|||
}; |
|||
|
|||
/* getIndex |
|||
calls back with an error if anything goes wrong |
|||
or with a cached index for a channel if it exists |
|||
(along with metadata) |
|||
otherwise it calls back with the index computed by 'computeIndex' |
|||
|
|||
as an added bonus: |
|||
if the channel exists but its index does not then it caches the index |
|||
*/ |
|||
const getIndex = (Env, channelName, cb) => { |
|||
const channel_cache = Env.channel_cache; |
|||
|
|||
const chan = channel_cache[channelName]; |
|||
|
|||
// if there is a channel in memory and it has an index cached, return it
|
|||
if (chan && chan.index) { |
|||
// enforce async behaviour
|
|||
return void setTimeout(function () { |
|||
cb(undefined, chan.index); |
|||
}); |
|||
} |
|||
|
|||
Env.batchIndexReads(channelName, cb, function (done) { |
|||
computeIndex(Env, channelName, (err, ret) => { |
|||
// this is most likely an unrecoverable filesystem error
|
|||
if (err) { return void done(err); } |
|||
// cache the computed result if possible
|
|||
if (chan) { chan.index = ret; } |
|||
// return
|
|||
done(void 0, ret); |
|||
}); |
|||
}); |
|||
}; |
|||
|
|||
/* storeMessage |
|||
* channel id |
|||
* the message to store |
|||
* whether the message is a checkpoint |
|||
* optionally the hash of the message |
|||
* it's not always used, but we guard against it |
|||
|
|||
|
|||
* async but doesn't have a callback |
|||
* source of a race condition whereby: |
|||
* two messaages can be inserted |
|||
* two offsets can be computed using the total size of all the messages |
|||
* but the offsets don't correspond to the actual location of the newlines |
|||
* because the two actions were performed like ABba... |
|||
* the fix is to use callbacks and implement queueing for writes |
|||
* to guarantee that offset computation is always atomic with writes |
|||
*/ |
|||
const storeMessage = function (Env, channel, msg, isCp, optionalMessageHash) { |
|||
const id = channel.id; |
|||
const Log = Env.Log; |
|||
|
|||
Env.queueStorage(id, function (next) { |
|||
const msgBin = Buffer.from(msg + '\n', 'utf8'); |
|||
// Store the message first, and update the index only once it's stored.
|
|||
// store.messageBin can be async so updating the index first may
|
|||
// result in a wrong cpIndex
|
|||
nThen((waitFor) => { |
|||
Env.store.messageBin(id, msgBin, waitFor(function (err) { |
|||
if (err) { |
|||
waitFor.abort(); |
|||
Log.error("HK_STORE_MESSAGE_ERROR", err.message); |
|||
|
|||
// this error is critical, but there's not much we can do at the moment
|
|||
// proceed with more messages, but they'll probably fail too
|
|||
// at least you won't have a memory leak
|
|||
|
|||
// TODO make it possible to respond to clients with errors so they know
|
|||
// their message wasn't stored
|
|||
return void next(); |
|||
} |
|||
})); |
|||
}).nThen((waitFor) => { |
|||
getIndex(Env, id, waitFor((err, index) => { |
|||
if (err) { |
|||
Log.warn("HK_STORE_MESSAGE_INDEX", err.stack); |
|||
// non-critical, we'll be able to get the channel index later
|
|||
return void next(); |
|||
} |
|||
if (typeof (index.line) === "number") { index.line++; } |
|||
if (isCp) { |
|||
index.cpIndex = sliceCpIndex(index.cpIndex, index.line || 0); |
|||
for (let k in index.offsetByHash) { |
|||
if (index.offsetByHash[k] < index.cpIndex[0]) { |
|||
delete index.offsetByHash[k]; |
|||
} |
|||
} |
|||
index.cpIndex.push({ |
|||
offset: index.size, |
|||
line: ((index.line || 0) + 1) |
|||
}); |
|||
} |
|||
if (optionalMessageHash) { index.offsetByHash[optionalMessageHash] = index.size; } |
|||
index.size += msgBin.length; |
|||
|
|||
// handle the next element in the queue
|
|||
next(); |
|||
})); |
|||
}); |
|||
}); |
|||
}; |
|||
|
|||
|
|||
/* getHistoryOffset |
|||
returns a number representing the byte offset from the start of the log |
|||
for whatever history you're seeking. |
|||
|
|||
query by providing a 'lastKnownHash', |
|||
which is really just a string of the first 64 characters of an encrypted message. |
|||
OR by -1 which indicates that we want the full history (byte offset 0) |
|||
OR nothing, which indicates that you want whatever messages the historyKeeper deems relevant |
|||
(typically the last few checkpoints) |
|||
|
|||
this function embeds a lot of the history keeper's logic: |
|||
|
|||
0. if you passed -1 as the lastKnownHash it means you want the complete history |
|||
* I'm not sure why you'd need to call this function if you know it will return 0 in this case... |
|||
* it has a side-effect of filling the index cache if it's empty |
|||
1. if you provided a lastKnownHash and that message does not exist in the history: |
|||
* either the client has made a mistake or the history they knew about no longer exists |
|||
* call back with EINVAL |
|||
2. if you did not provide a lastKnownHash |
|||
* and there are fewer than two checkpoints: |
|||
* return 0 (read from the start of the file) |
|||
* and there are two or more checkpoints: |
|||
* return the offset of the earliest checkpoint which 'sliceCpIndex' considers relevant |
|||
3. if you did provide a lastKnownHash |
|||
* read through the log until you find the hash that you're looking for |
|||
* call back with either the byte offset of the message that you found OR |
|||
* -1 if you didn't find it |
|||
|
|||
*/ |
|||
const getHistoryOffset = (Env, channelName, lastKnownHash, cb) => { |
|||
const store = Env.store; |
|||
const Log = Env.Log; |
|||
|
|||
// lastKnownhash === -1 means we want the complete history
|
|||
if (lastKnownHash === -1) { return void cb(null, 0); } |
|||
let offset = -1; |
|||
nThen((waitFor) => { |
|||
getIndex(Env, channelName, waitFor((err, index) => { |
|||
if (err) { waitFor.abort(); return void cb(err); } |
|||
|
|||
// check if the "hash" the client is requesting exists in the index
|
|||
const lkh = index.offsetByHash[lastKnownHash]; |
|||
// we evict old hashes from the index as new checkpoints are discovered.
|
|||
// if someone connects and asks for a hash that is no longer relevant,
|
|||
// we tell them it's an invalid request. This is because of the semantics of "GET_HISTORY"
|
|||
// which is only ever used when connecting or reconnecting in typical uses of history...
|
|||
// this assumption should hold for uses by chainpad, but perhaps not for other uses cases.
|
|||
// EXCEPT: other cases don't use checkpoints!
|
|||
// clients that are told that their request is invalid should just make another request
|
|||
// without specifying the hash, and just trust the server to give them the relevant data.
|
|||
// QUESTION: does this mean mailboxes are causing the server to store too much stuff in memory?
|
|||
if (lastKnownHash && typeof(lkh) !== "number") { |
|||
waitFor.abort(); |
|||
return void cb(new Error('EINVAL')); |
|||
} |
|||
|
|||
// Since last 2 checkpoints
|
|||
if (!lastKnownHash) { |
|||
waitFor.abort(); |
|||
// Less than 2 checkpoints in the history: return everything
|
|||
if (index.cpIndex.length < 2) { return void cb(null, 0); } |
|||
// Otherwise return the second last checkpoint's index
|
|||
return void cb(null, index.cpIndex[0].offset); |
|||
/* LATER... |
|||
in practice, two checkpoints can be very close together |
|||
we have measures to avoid duplicate checkpoints, but editors |
|||
can produce nearby checkpoints which are slightly different, |
|||
and slip past these protections. To be really careful, we can |
|||
seek past nearby checkpoints by some number of patches so as |
|||
to ensure that all editors have sufficient knowledge of history |
|||
to reconcile their differences. */ |
|||
} |
|||
|
|||
offset = lkh; |
|||
})); |
|||
}).nThen((waitFor) => { |
|||
// if offset is less than zero then presumably the channel has no messages
|
|||
// returning falls through to the next block and therefore returns -1
|
|||
if (offset !== -1) { return; } |
|||
|
|||
// do a lookup from the index
|
|||
// FIXME maybe we don't need this anymore?
|
|||
// otherwise we have a non-negative offset and we can start to read from there
|
|||
store.readMessagesBin(channelName, 0, (msgObj, readMore, abort) => { |
|||
// tryParse return a parsed message or undefined
|
|||
const msg = tryParse(Env, msgObj.buff.toString('utf8')); |
|||
// if it was undefined then go onto the next message
|
|||
if (typeof msg === "undefined") { return readMore(); } |
|||
if (typeof(msg[4]) !== 'string' || lastKnownHash !== getHash(msg[4], Log)) { |
|||
return void readMore(); |
|||
} |
|||
offset = msgObj.offset; |
|||
abort(); |
|||
}, waitFor(function (err) { |
|||
if (err) { waitFor.abort(); return void cb(err); } |
|||
})); |
|||
}).nThen(() => { |
|||
cb(null, offset); |
|||
}); |
|||
}; |
|||
|
|||
/* getHistoryAsync |
|||
* finds the appropriate byte offset from which to begin reading using 'getHistoryOffset' |
|||
* streams through the rest of the messages, safely parsing them and returning the parsed content to the handler |
|||
* calls back when it has reached the end of the log |
|||
|
|||
Used by: |
|||
* GET_HISTORY |
|||
|
|||
*/ |
|||
const getHistoryAsync = (Env, channelName, lastKnownHash, beforeHash, handler, cb) => { |
|||
const store = Env.store; |
|||
|
|||
let offset = -1; |
|||
nThen((waitFor) => { |
|||
getHistoryOffset(Env, channelName, lastKnownHash, waitFor((err, os) => { |
|||
if (err) { |
|||
waitFor.abort(); |
|||
return void cb(err); |
|||
} |
|||
offset = os; |
|||
})); |
|||
}).nThen((waitFor) => { |
|||
if (offset === -1) { return void cb(new Error("could not find offset")); } |
|||
const start = (beforeHash) ? 0 : offset; |
|||
store.readMessagesBin(channelName, start, (msgObj, readMore, abort) => { |
|||
if (beforeHash && msgObj.offset >= offset) { return void abort(); } |
|||
var parsed = tryParse(Env, msgObj.buff.toString('utf8')); |
|||
if (!parsed) { return void readMore(); } |
|||
handler(parsed, readMore); |
|||
}, waitFor(function (err) { |
|||
return void cb(err); |
|||
})); |
|||
}); |
|||
}; |
|||
|
|||
/* getOlderHistory |
|||
* allows clients to query for all messages until a known hash is read |
|||
* stores all messages in history as they are read |
|||
* can therefore be very expensive for memory |
|||
* should probably be converted to a streaming interface |
|||
|
|||
Used by: |
|||
* GET_HISTORY_RANGE |
|||
*/ |
|||
const getOlderHistory = function (Env, channelName, oldestKnownHash, cb) { |
|||
const store = Env.store; |
|||
const Log = Env.Log; |
|||
var messageBuffer = []; |
|||
var found = false; |
|||
store.getMessages(channelName, function (msgStr) { |
|||
if (found) { return; } |
|||
|
|||
let parsed = tryParse(Env, msgStr); |
|||
if (typeof parsed === "undefined") { return; } |
|||
|
|||
// identify classic metadata messages by their inclusion of a channel.
|
|||
// and don't send metadata, since:
|
|||
// 1. the user won't be interested in it
|
|||
// 2. this metadata is potentially incomplete/incorrect
|
|||
if (isMetadataMessage(parsed)) { return; } |
|||
|
|||
var content = parsed[4]; |
|||
if (typeof(content) !== 'string') { return; } |
|||
|
|||
var hash = getHash(content, Log); |
|||
if (hash === oldestKnownHash) { |
|||
found = true; |
|||
} |
|||
messageBuffer.push(parsed); |
|||
}, function (err) { |
|||
if (err) { |
|||
Log.error("HK_GET_OLDER_HISTORY", err); |
|||
} |
|||
cb(messageBuffer); |
|||
}); |
|||
}; |
|||
|
|||
const handleRPC = function (Env, Server, seq, userId, parsed) { |
|||
const HISTORY_KEEPER_ID = Env.id; |
|||
|
|||
/* RPC Calls... */ |
|||
var rpc_call = parsed.slice(1); |
|||
|
|||
Server.send(userId, [seq, 'ACK']); |
|||
try { |
|||
// slice off the sequence number and pass in the rest of the message
|
|||
Env.rpc(Server, rpc_call, function (err, output) { |
|||
if (err) { |
|||
Server.send(userId, [0, HISTORY_KEEPER_ID, 'MSG', userId, JSON.stringify([parsed[0], 'ERROR', err])]); |
|||
return; |
|||
} |
|||
Server.send(userId, [0, HISTORY_KEEPER_ID, 'MSG', userId, JSON.stringify([parsed[0]].concat(output))]); |
|||
}); |
|||
} catch (e) { |
|||
// if anything throws in the middle, send an error
|
|||
Server.send(userId, [0, HISTORY_KEEPER_ID, 'MSG', userId, JSON.stringify([parsed[0], 'ERROR', 'SERVER_ERROR'])]); |
|||
} |
|||
}; |
|||
|
|||
const handleGetHistory = function (Env, Server, seq, userId, parsed) { |
|||
const store = Env.store; |
|||
const tasks = Env.tasks; |
|||
const metadata_cache = Env.metadata_cache; |
|||
const channel_cache = Env.channel_cache; |
|||
const HISTORY_KEEPER_ID = Env.id; |
|||
const Log = Env.Log; |
|||
|
|||
// parsed[1] is the channel id
|
|||
// parsed[2] is a validation key or an object containing metadata (optionnal)
|
|||
// parsed[3] is the last known hash (optionnal)
|
|||
|
|||
Server.send(userId, [seq, 'ACK']); |
|||
var channelName = parsed[1]; |
|||
var config = parsed[2]; |
|||
var metadata = {}; |
|||
var lastKnownHash; |
|||
|
|||
// clients can optionally pass a map of attributes
|
|||
// if the channel already exists this map will be ignored
|
|||
// otherwise it will be stored as the initial metadata state for the channel
|
|||
if (config && typeof config === "object" && !Array.isArray(parsed[2])) { |
|||
lastKnownHash = config.lastKnownHash; |
|||
metadata = config.metadata || {}; |
|||
if (metadata.expire) { |
|||
metadata.expire = +metadata.expire * 1000 + (+new Date()); |
|||
} |
|||
} |
|||
metadata.channel = channelName; |
|||
metadata.created = +new Date(); |
|||
|
|||
// if the user sends us an invalid key, we won't be able to validate their messages
|
|||
// so they'll never get written to the log anyway. Let's just drop their message
|
|||
// on the floor instead of doing a bunch of extra work
|
|||
// TODO send them an error message so they know something is wrong
|
|||
if (metadata.validateKey && !isValidValidateKeyString(metadata.validateKey)) { |
|||
return void Log.error('HK_INVALID_KEY', metadata.validateKey); |
|||
} |
|||
|
|||
nThen(function (waitFor) { |
|||
var w = waitFor(); |
|||
|
|||
/* unless this is a young channel, we will serve all messages from an offset |
|||
this will not include the channel metadata, so we need to explicitly fetch that. |
|||
unfortunately, we can't just serve it blindly, since then young channels will |
|||
send the metadata twice, so let's do a quick check of what we're going to serve... |
|||
*/ |
|||
getIndex(Env, channelName, waitFor((err, index) => { |
|||
/* if there's an error here, it should be encountered |
|||
and handled by the next nThen block. |
|||
so, let's just fall through... |
|||
*/ |
|||
if (err) { return w(); } |
|||
|
|||
|
|||
// it's possible that the channel doesn't have metadata
|
|||
// but in that case there's no point in checking if the channel expired
|
|||
// or in trying to send metadata, so just skip this block
|
|||
if (!index || !index.metadata) { return void w(); } |
|||
// And then check if the channel is expired. If it is, send the error and abort
|
|||
// FIXME this is hard to read because 'checkExpired' has side effects
|
|||
if (checkExpired(Env, Server, channelName)) { return void waitFor.abort(); } |
|||
// always send metadata with GET_HISTORY requests
|
|||
Server.send(userId, [0, HISTORY_KEEPER_ID, 'MSG', userId, JSON.stringify(index.metadata)], w); |
|||
})); |
|||
}).nThen(() => { |
|||
let msgCount = 0; |
|||
|
|||
// TODO compute lastKnownHash in a manner such that it will always skip past the metadata line?
|
|||
getHistoryAsync(Env, channelName, lastKnownHash, false, (msg, readMore) => { |
|||
msgCount++; |
|||
// avoid sending the metadata message a second time
|
|||
if (isMetadataMessage(msg) && metadata_cache[channelName]) { return readMore(); } |
|||
Server.send(userId, [0, HISTORY_KEEPER_ID, 'MSG', userId, JSON.stringify(msg)], readMore); |
|||
}, (err) => { |
|||
if (err && err.code !== 'ENOENT') { |
|||
if (err.message !== 'EINVAL') { Log.error("HK_GET_HISTORY", err); } |
|||
const parsedMsg = {error:err.message, channel: channelName}; |
|||
Server.send(userId, [0, HISTORY_KEEPER_ID, 'MSG', userId, JSON.stringify(parsedMsg)]); |
|||
return; |
|||
} |
|||
|
|||
const chan = channel_cache[channelName]; |
|||
|
|||
if (msgCount === 0 && !metadata_cache[channelName] && Server.channelContainsUser(channelName, userId)) { |
|||
metadata_cache[channelName] = metadata; |
|||
|
|||
// the index will have already been constructed and cached at this point
|
|||
// but it will not have detected any metadata because it hasn't been written yet
|
|||
// this means that the cache starts off as invalid, so we have to correct it
|
|||
if (chan && chan.index) { chan.index.metadata = metadata; } |
|||
|
|||
// new channels will always have their metadata written to a dedicated metadata log
|
|||
// but any lines after the first which are not amendments in a particular format will be ignored.
|
|||
// Thus we should be safe from race conditions here if just write metadata to the log as below...
|
|||
// TODO validate this logic
|
|||
// otherwise maybe we need to check that the metadata log is empty as well
|
|||
store.writeMetadata(channelName, JSON.stringify(metadata), function (err) { |
|||
if (err) { |
|||
// FIXME tell the user that there was a channel error?
|
|||
return void Log.error('HK_WRITE_METADATA', { |
|||
channel: channelName, |
|||
error: err, |
|||
}); |
|||
} |
|||
}); |
|||
|
|||
// write tasks
|
|||
if(metadata.expire && typeof(metadata.expire) === 'number') { |
|||
// the fun part...
|
|||
// the user has said they want this pad to expire at some point
|
|||
tasks.write(metadata.expire, "EXPIRE", [ channelName ], function (err) { |
|||
if (err) { |
|||
// if there is an error, we don't want to crash the whole server...
|
|||
// just log it, and if there's a problem you'll be able to fix it
|
|||
// at a later date with the provided information
|
|||
Log.error('HK_CREATE_EXPIRE_TASK', err); |
|||
Log.info('HK_INVALID_EXPIRE_TASK', JSON.stringify([metadata.expire, 'EXPIRE', channelName])); |
|||
} |
|||
}); |
|||
} |
|||
Server.send(userId, [0, HISTORY_KEEPER_ID, 'MSG', userId, JSON.stringify(metadata)]); |
|||
} |
|||
|
|||
// End of history message:
|
|||
let parsedMsg = {state: 1, channel: channelName}; |
|||
|
|||
Server.send(userId, [0, HISTORY_KEEPER_ID, 'MSG', userId, JSON.stringify(parsedMsg)]); |
|||
}); |
|||
}); |
|||
}; |
|||
|
|||
const handleGetHistoryRange = function (Env, Server, seq, userId, parsed) { |
|||
var channelName = parsed[1]; |
|||
var map = parsed[2]; |
|||
const HISTORY_KEEPER_ID = Env.id; |
|||
|
|||
if (!(map && typeof(map) === 'object')) { |
|||
return void Server.send(userId, [seq, 'ERROR', 'INVALID_ARGS', HISTORY_KEEPER_ID]); |
|||
} |
|||
|
|||
var oldestKnownHash = map.from; |
|||
var desiredMessages = map.count; |
|||
var desiredCheckpoint = map.cpCount; |
|||
var txid = map.txid; |
|||
if (typeof(desiredMessages) !== 'number' && typeof(desiredCheckpoint) !== 'number') { |
|||
return void Server.send(userId, [seq, 'ERROR', 'UNSPECIFIED_COUNT', HISTORY_KEEPER_ID]); |
|||
} |
|||
|
|||
if (!txid) { |
|||
return void Server.send(userId, [seq, 'ERROR', 'NO_TXID', HISTORY_KEEPER_ID]); |
|||
} |
|||
|
|||
Server.send(userId, [seq, 'ACK']); |
|||
return void getOlderHistory(Env, channelName, oldestKnownHash, function (messages) { |
|||
var toSend = []; |
|||
if (typeof (desiredMessages) === "number") { |
|||
toSend = messages.slice(-desiredMessages); |
|||
} else { |
|||
let cpCount = 0; |
|||
for (var i = messages.length - 1; i >= 0; i--) { |
|||
if (/^cp\|/.test(messages[i][4]) && i !== (messages.length - 1)) { |
|||
cpCount++; |
|||
} |
|||
toSend.unshift(messages[i]); |
|||
if (cpCount >= desiredCheckpoint) { break; } |
|||
} |
|||
} |
|||
toSend.forEach(function (msg) { |
|||
Server.send(userId, [0, HISTORY_KEEPER_ID, 'MSG', userId, |
|||
JSON.stringify(['HISTORY_RANGE', txid, msg])]); |
|||
}); |
|||
|
|||
Server.send(userId, [0, HISTORY_KEEPER_ID, 'MSG', userId, |
|||
JSON.stringify(['HISTORY_RANGE_END', txid, channelName]) |
|||
]); |
|||
}); |
|||
}; |
|||
|
|||
const handleGetFullHistory = function (Env, Server, seq, userId, parsed) { |
|||
const HISTORY_KEEPER_ID = Env.id; |
|||
const Log = Env.Log; |
|||
|
|||
// parsed[1] is the channel id
|
|||
// parsed[2] is a validation key (optionnal)
|
|||
// parsed[3] is the last known hash (optionnal)
|
|||
|
|||
Server.send(userId, [seq, 'ACK']); |
|||
|
|||
// FIXME should we send metadata here too?
|
|||
// none of the clientside code which uses this API needs metadata, but it won't hurt to send it (2019-08-22)
|
|||
return void getHistoryAsync(Env, parsed[1], -1, false, (msg, readMore) => { |
|||
Server.send(userId, [0, HISTORY_KEEPER_ID, 'MSG', userId, JSON.stringify(['FULL_HISTORY', msg])], readMore); |
|||
}, (err) => { |
|||
let parsedMsg = ['FULL_HISTORY_END', parsed[1]]; |
|||
if (err) { |
|||
Log.error('HK_GET_FULL_HISTORY', err.stack); |
|||
parsedMsg = ['ERROR', parsed[1], err.message]; |
|||
} |
|||
Server.send(userId, [0, HISTORY_KEEPER_ID, 'MSG', userId, JSON.stringify(parsedMsg)]); |
|||
}); |
|||
}; |
|||
|
|||
const directMessageCommands = { |
|||
GET_HISTORY: handleGetHistory, |
|||
GET_HISTORY_RANGE: handleGetHistoryRange, |
|||
GET_FULL_HISTORY: handleGetFullHistory, |
|||
}; |
|||
|
|||
/* onDirectMessage |
|||
* exported for use by the netflux-server |
|||
* parses and handles all direct messages directed to the history keeper |
|||
* check if it's expired and execute all the associated side-effects |
|||
* routes queries to the appropriate handlers |
|||
*/ |
|||
HK.onDirectMessage = function (Env, Server, seq, userId, json) { |
|||
const Log = Env.Log; |
|||
Log.silly('HK_MESSAGE', json); |
|||
|
|||
let parsed; |
|||
try { |
|||
parsed = JSON.parse(json[2]); |
|||
} catch (err) { |
|||
Log.error("HK_PARSE_CLIENT_MESSAGE", json); |
|||
return; |
|||
} |
|||
|
|||
// If the requested history is for an expired channel, abort
|
|||
// Note the if we don't have the keys for that channel in metadata_cache, we'll
|
|||
// have to abort later (once we know the expiration time)
|
|||
if (checkExpired(Env, Server, parsed[1])) { return; } |
|||
|
|||
// look up the appropriate command in the map of commands or fall back to RPC
|
|||
var command = directMessageCommands[parsed[0]] || handleRPC; |
|||
|
|||
// run the command with the standard function signature
|
|||
command(Env, Server, seq, userId, parsed); |
|||
}; |
|||
|
|||
/* onChannelMessage |
|||
Determine what we should store when a message a broadcasted to a channel" |
|||
|
|||
* ignores ephemeral channels |
|||
* ignores messages sent to expired channels |
|||
* rejects duplicated checkpoints |
|||
* validates messages to channels that have validation keys |
|||
* caches the id of the last saved checkpoint |
|||
* adds timestamps to incoming messages |
|||
* writes messages to the store |
|||
*/ |
|||
HK.onChannelMessage = function (Env, Server, channel, msgStruct) { |
|||
const Log = Env.Log; |
|||
|
|||
// TODO our usage of 'channel' here looks prone to errors
|
|||
// we only use it for its 'id', but it can contain other stuff
|
|||
// also, we're using this RPC from both the RPC and Netflux-server
|
|||
// we should probably just change this to expect a channel id directly
|
|||
|
|||
// don't store messages if the channel id indicates that it's an ephemeral message
|
|||
if (!channel.id || channel.id.length === EPHEMERAL_CHANNEL_LENGTH) { return; } |
|||
|
|||
const isCp = /^cp\|/.test(msgStruct[4]); |
|||
let id; |
|||
if (isCp) { |
|||
// id becomes either null or an array or results...
|
|||
id = CHECKPOINT_PATTERN.exec(msgStruct[4]); |
|||
if (Array.isArray(id) && id[2] && id[2] === channel.lastSavedCp) { |
|||
// Reject duplicate checkpoints
|
|||
return; |
|||
} |
|||
} |
|||
|
|||
let metadata; |
|||
nThen(function (w) { |
|||
// getIndex (and therefore the latest metadata)
|
|||
getIndex(Env, channel.id, w(function (err, index) { |
|||
if (err) { |
|||
w.abort(); |
|||
return void Log.error('CHANNEL_MESSAGE_ERROR', err); |
|||
} |
|||
|
|||
if (!index.metadata) { |
|||
// if there's no channel metadata then it can't be an expiring channel
|
|||
// nor can we possibly validate it
|
|||
return; |
|||
} |
|||
|
|||
metadata = index.metadata; |
|||
|
|||
// don't write messages to expired channels
|
|||
if (checkExpired(Env, Server, channel)) { return void w.abort(); } |
|||
|
|||
// if there's no validateKey present skip to the next block
|
|||
if (!metadata.validateKey) { return; } |
|||
|
|||
// trim the checkpoint indicator off the message if it's present
|
|||
let signedMsg = (isCp) ? msgStruct[4].replace(CHECKPOINT_PATTERN, '') : msgStruct[4]; |
|||
// convert the message from a base64 string into a Uint8Array
|
|||
|
|||
// FIXME this can fail and the client won't notice
|
|||
signedMsg = Nacl.util.decodeBase64(signedMsg); |
|||
|
|||
// FIXME this can blow up
|
|||
// TODO check that that won't cause any problems other than not being able to append...
|
|||
const validateKey = Nacl.util.decodeBase64(metadata.validateKey); |
|||
// validate the message
|
|||
const validated = Nacl.sign.open(signedMsg, validateKey); |
|||
if (!validated) { |
|||
// don't go any further if the message fails validation
|
|||
w.abort(); |
|||
Log.info("HK_SIGNED_MESSAGE_REJECTED", 'Channel '+channel.id); |
|||
return; |
|||
} |
|||
})); |
|||
}).nThen(function () { |
|||
// do checkpoint stuff...
|
|||
|
|||
// 1. get the checkpoint id
|
|||
// 2. reject duplicate checkpoints
|
|||
|
|||
if (isCp) { |
|||
// if the message is a checkpoint we will have already validated
|
|||
// that it isn't a duplicate. remember its id so that we can
|
|||
// repeat this process for the next incoming checkpoint
|
|||
|
|||
// WARNING: the fact that we only check the most recent checkpoints
|
|||
// is a potential source of bugs if one editor has high latency and
|
|||
// pushes a duplicate of an earlier checkpoint than the latest which
|
|||
// has been pushed by editors with low latency
|
|||
// FIXME
|
|||
if (Array.isArray(id) && id[2]) { |
|||
// Store new checkpoint hash
|
|||
channel.lastSavedCp = id[2]; |
|||
} |
|||
} |
|||
|
|||
// add the time to the message
|
|||
msgStruct.push(now()); |
|||
|
|||
// storeMessage
|
|||
storeMessage(Env, channel, JSON.stringify(msgStruct), isCp, getHash(msgStruct[4], Log)); |
|||
}); |
|||
}; |
|||
|
|||
|
|||
@ -0,0 +1,306 @@ |
|||
/*jshint esversion: 6 */ |
|||
const nThen = require("nthen"); |
|||
|
|||
const Util = require("./common-util"); |
|||
const mkEvent = Util.mkEvent; |
|||
|
|||
const Core = require("./commands/core"); |
|||
const Admin = require("./commands/admin-rpc"); |
|||
const Pinning = require("./commands/pin-rpc"); |
|||
const Quota = require("./commands/quota"); |
|||
const Block = require("./commands/block"); |
|||
const Metadata = require("./commands/metadata"); |
|||
const Channel = require("./commands/channel"); |
|||
const Upload = require("./commands/upload"); |
|||
|
|||
var RPC = module.exports; |
|||
|
|||
const Store = require("../storage/file"); |
|||
const BlobStore = require("../storage/blob"); |
|||
|
|||
const UNAUTHENTICATED_CALLS = { |
|||
GET_FILE_SIZE: Pinning.getFileSize, |
|||
GET_MULTIPLE_FILE_SIZE: Pinning.getMultipleFileSize, |
|||
GET_DELETED_PADS: Pinning.getDeletedPads, |
|||
IS_CHANNEL_PINNED: Pinning.isChannelPinned, |
|||
IS_NEW_CHANNEL: Channel.isNewChannel, |
|||
WRITE_PRIVATE_MESSAGE: Channel.writePrivateMessage, |
|||
GET_METADATA: Metadata.getMetadata, |
|||
}; |
|||
|
|||
var isUnauthenticateMessage = function (msg) { |
|||
return msg && msg.length === 2 && typeof(UNAUTHENTICATED_CALLS[msg[0]]) === 'function'; |
|||
}; |
|||
|
|||
var handleUnauthenticatedMessage = function (Env, msg, respond, Server) { |
|||
Env.Log.silly('LOG_RPC', msg[0]); |
|||
|
|||
var method = UNAUTHENTICATED_CALLS[msg[0]]; |
|||
method(Env, msg[1], function (err, value) { |
|||
if (err) { |
|||
Env.WARN(err, msg[1]); |
|||
return void respond(err); |
|||
} |
|||
respond(err, [null, value, null]); |
|||
}, Server); |
|||
}; |
|||
|
|||
const AUTHENTICATED_USER_TARGETED = { |
|||
RESET: Pinning.resetUserPins, |
|||
PIN: Pinning.pinChannel, |
|||
UNPIN: Pinning.unpinChannel, |
|||
CLEAR_OWNED_CHANNEL: Channel.clearOwnedChannel, |
|||
REMOVE_OWNED_CHANNEL: Channel.removeOwnedChannel, |
|||
TRIM_HISTORY: Channel.trimHistory, |
|||
UPLOAD_STATUS: Upload.status, |
|||
UPLOAD: Upload.upload, |
|||
UPLOAD_COMPLETE: Upload.complete, |
|||
UPLOAD_CANCEL: Upload.cancel, |
|||
OWNED_UPLOAD_COMPLETE: Upload.complete_owned, |
|||
WRITE_LOGIN_BLOCK: Block.writeLoginBlock, |
|||
REMOVE_LOGIN_BLOCK: Block.removeLoginBlock, |
|||
ADMIN: Admin.command, |
|||
SET_METADATA: Metadata.setMetadata, |
|||
}; |
|||
|
|||
const AUTHENTICATED_USER_SCOPED = { |
|||
GET_HASH: Pinning.getHash, |
|||
GET_TOTAL_SIZE: Pinning.getTotalSize, |
|||
UPDATE_LIMITS: Quota.getUpdatedLimit, |
|||
GET_LIMIT: Pinning.getLimit, |
|||
EXPIRE_SESSION: Core.expireSessionAsync, |
|||
REMOVE_PINS: Pinning.removePins, |
|||
TRIM_PINS: Pinning.trimPins, |
|||
COOKIE: Core.haveACookie, |
|||
}; |
|||
|
|||
var isAuthenticatedCall = function (call) { |
|||
if (call === 'UPLOAD') { return false; } |
|||
return typeof(AUTHENTICATED_USER_TARGETED[call] || AUTHENTICATED_USER_SCOPED[call]) === 'function'; |
|||
}; |
|||
|
|||
var handleAuthenticatedMessage = function (Env, unsafeKey, msg, respond, Server) { |
|||
/* If you have gotten this far, you have signed the message with the |
|||
public key which you provided. |
|||
*/ |
|||
|
|||
var safeKey = Util.escapeKeyCharacters(unsafeKey); |
|||
|
|||
var Respond = function (e, value) { |
|||
var session = Env.Sessions[safeKey]; |
|||
var token = session? session.tokens.slice(-1)[0]: ''; |
|||
var cookie = Core.makeCookie(token).join('|'); |
|||
respond(e ? String(e): e, [cookie].concat(typeof(value) !== 'undefined' ?value: [])); |
|||
}; |
|||
|
|||
msg.shift(); |
|||
// discard validated cookie from message
|
|||
if (!msg.length) { |
|||
return void Respond('INVALID_MSG'); |
|||
} |
|||
|
|||
var TYPE = msg[0]; |
|||
|
|||
Env.Log.silly('LOG_RPC', TYPE); |
|||
|
|||
if (typeof(AUTHENTICATED_USER_TARGETED[TYPE]) === 'function') { |
|||
return void AUTHENTICATED_USER_TARGETED[TYPE](Env, safeKey, msg[1], function (e, value) { |
|||
Env.WARN(e, value); |
|||
return void Respond(e, value); |
|||
}, Server); |
|||
} |
|||
|
|||
if (typeof(AUTHENTICATED_USER_SCOPED[TYPE]) === 'function') { |
|||
return void AUTHENTICATED_USER_SCOPED[TYPE](Env, safeKey, function (e, value) { |
|||
if (e) { |
|||
Env.WARN(e, safeKey); |
|||
return void Respond(e); |
|||
} |
|||
Respond(e, value); |
|||
}); |
|||
} |
|||
|
|||
return void Respond('UNSUPPORTED_RPC_CALL', msg); |
|||
}; |
|||
|
|||
var rpc = function (Env, Server, data, respond) { |
|||
if (!Array.isArray(data)) { |
|||
Env.Log.debug('INVALID_ARG_FORMET', data); |
|||
return void respond('INVALID_ARG_FORMAT'); |
|||
} |
|||
|
|||
if (!data.length) { |
|||
return void respond("INSUFFICIENT_ARGS"); |
|||
} else if (data.length !== 1) { |
|||
Env.Log.debug('UNEXPECTED_ARGUMENTS_LENGTH', data); |
|||
} |
|||
|
|||
var msg = data[0].slice(0); |
|||
|
|||
if (!Array.isArray(msg)) { |
|||
return void respond('INVALID_ARG_FORMAT'); |
|||
} |
|||
|
|||
if (isUnauthenticateMessage(msg)) { |
|||
return handleUnauthenticatedMessage(Env, msg, respond, Server); |
|||
} |
|||
|
|||
var signature = msg.shift(); |
|||
var publicKey = msg.shift(); |
|||
|
|||
// make sure a user object is initialized in the cookie jar
|
|||
if (publicKey) { |
|||
Core.getSession(Env.Sessions, publicKey); |
|||
} else { |
|||
Env.Log.debug("NO_PUBLIC_KEY_PROVIDED", publicKey); |
|||
} |
|||
|
|||
var cookie = msg[0]; |
|||
if (!Core.isValidCookie(Env.Sessions, publicKey, cookie)) { |
|||
// no cookie is fine if the RPC is to get a cookie
|
|||
if (msg[1] !== 'COOKIE') { |
|||
return void respond('NO_COOKIE'); |
|||
} |
|||
} |
|||
|
|||
var serialized = JSON.stringify(msg); |
|||
|
|||
if (!(serialized && typeof(publicKey) === 'string')) { |
|||
return void respond('INVALID_MESSAGE_OR_PUBLIC_KEY'); |
|||
} |
|||
|
|||
var command = msg[1]; |
|||
|
|||
if (command === 'UPLOAD') { |
|||
// UPLOAD is a special case that skips signature validation
|
|||
// intentional fallthrough behaviour
|
|||
return void handleAuthenticatedMessage(Env, publicKey, msg, respond, Server); |
|||
} |
|||
if (isAuthenticatedCall(command)) { |
|||
// check the signature on the message
|
|||
// refuse the command if it doesn't validate
|
|||
if (Core.checkSignature(Env, serialized, signature, publicKey) === true) { |
|||
return void handleAuthenticatedMessage(Env, publicKey, msg, respond, Server); |
|||
} |
|||
return void respond("INVALID_SIGNATURE_OR_PUBLIC_KEY"); |
|||
} |
|||
Env.Log.warn('INVALID_RPC_CALL', command); |
|||
return void respond("INVALID_RPC_CALL"); |
|||
}; |
|||
|
|||
RPC.create = function (config, cb) { |
|||
var Log = config.log; |
|||
|
|||
// load pin-store...
|
|||
Log.silly('LOADING RPC MODULE'); |
|||
|
|||
var keyOrDefaultString = function (key, def) { |
|||
return typeof(config[key]) === 'string'? config[key]: def; |
|||
}; |
|||
|
|||
var WARN = function (e, output) { |
|||
if (e && output) { |
|||
Log.warn(e, { |
|||
output: output, |
|||
message: String(e), |
|||
stack: new Error(e).stack, |
|||
}); |
|||
} |
|||
}; |
|||
|
|||
if (typeof(config.domain) !== 'undefined') { |
|||
throw new Error('fuck'); |
|||
} |
|||
|
|||
var Env = { |
|||
historyKeeper: config.historyKeeper, |
|||
intervals: config.intervals || {}, |
|||
maxUploadSize: config.maxUploadSize || (20 * 1024 * 1024), |
|||
Sessions: {}, |
|||
paths: {}, |
|||
msgStore: config.store, |
|||
pinStore: undefined, |
|||
pinnedPads: {}, |
|||
evPinnedPadsReady: mkEvent(true), |
|||
limits: {}, |
|||
admins: [], |
|||
Log: Log, |
|||
WARN: WARN, |
|||
flushCache: config.flushCache, |
|||
adminEmail: config.adminEmail, |
|||
allowSubscriptions: config.allowSubscriptions, |
|||
myDomain: config.myDomain, |
|||
mySubdomain: config.mySubdomain, |
|||
customLimits: config.customLimits, |
|||
// FIXME this attribute isn't in the default conf
|
|||
// but it is referenced in Quota
|
|||
domain: config.domain |
|||
}; |
|||
|
|||
Env.defaultStorageLimit = typeof(config.defaultStorageLimit) === 'number' && config.defaultStorageLimit > 0? |
|||
config.defaultStorageLimit: |
|||
Core.DEFAULT_LIMIT; |
|||
|
|||
try { |
|||
Env.admins = (config.adminKeys || []).map(function (k) { |
|||
k = k.replace(/\/+$/, ''); |
|||
var s = k.split('/'); |
|||
return s[s.length-1]; |
|||
}); |
|||
} catch (e) { |
|||
console.error("Can't parse admin keys. Please update or fix your config.js file!"); |
|||
} |
|||
|
|||
var Sessions = Env.Sessions; |
|||
var paths = Env.paths; |
|||
var pinPath = paths.pin = keyOrDefaultString('pinPath', './pins'); |
|||
paths.block = keyOrDefaultString('blockPath', './block'); |
|||
paths.data = keyOrDefaultString('filePath', './datastore'); |
|||
paths.staging = keyOrDefaultString('blobStagingPath', './blobstage'); |
|||
paths.blob = keyOrDefaultString('blobPath', './blob'); |
|||
|
|||
var updateLimitDaily = function () { |
|||
Quota.updateCachedLimits(Env, function (e) { |
|||
if (e) { |
|||
WARN('limitUpdate', e); |
|||
} |
|||
}); |
|||
}; |
|||
Quota.applyCustomLimits(Env); |
|||
updateLimitDaily(); |
|||
Env.intervals.dailyLimitUpdate = setInterval(updateLimitDaily, 24*3600*1000); |
|||
|
|||
Pinning.loadChannelPins(Env); |
|||
|
|||
nThen(function (w) { |
|||
Store.create({ |
|||
filePath: pinPath, |
|||
}, w(function (s) { |
|||
Env.pinStore = s; |
|||
})); |
|||
BlobStore.create({ |
|||
blobPath: config.blobPath, |
|||
blobStagingPath: config.blobStagingPath, |
|||
archivePath: config.archivePath, |
|||
getSession: function (safeKey) { |
|||
return Core.getSession(Sessions, safeKey); |
|||
}, |
|||
}, w(function (err, blob) { |
|||
if (err) { throw new Error(err); } |
|||
Env.blobStore = blob; |
|||
})); |
|||
}).nThen(function () { |
|||
cb(void 0, function (Server, data, respond) { |
|||
try { |
|||
return rpc(Env, Server, data, respond); |
|||
} catch (e) { |
|||
console.log("Error from RPC with data " + JSON.stringify(data)); |
|||
console.log(e.stack); |
|||
} |
|||
}); |
|||
// expire old sessions once per minute
|
|||
Env.intervals.sessionExpirationInterval = setInterval(function () { |
|||
Core.expireSessions(Sessions); |
|||
}, Core.SESSION_EXPIRATION_TIME); |
|||
}); |
|||
}; |
|||
@ -0,0 +1,172 @@ |
|||
var WriteQueue = require("./write-queue"); |
|||
var Util = require("./common-util"); |
|||
|
|||
/* This module provides implements a FIFO scheduler |
|||
which assumes the existence of three types of async tasks: |
|||
|
|||
1. ordered tasks which must be executed sequentially |
|||
2. unordered tasks which can be executed in parallel |
|||
3. blocking tasks which must block the execution of all other tasks |
|||
|
|||
The scheduler assumes there will be many resources identified by strings, |
|||
and that the constraints described above will only apply in the context |
|||
of identical string ids. |
|||
|
|||
Many blocking tasks may be executed in parallel so long as they |
|||
concern resources identified by different ids. |
|||
|
|||
USAGE: |
|||
|
|||
const schedule = require("./schedule")(); |
|||
|
|||
// schedule two sequential tasks using the resource 'pewpew'
|
|||
schedule.ordered('pewpew', function (next) { |
|||
appendToFile('beep\n', next); |
|||
}); |
|||
schedule.ordered('pewpew', function (next) { |
|||
appendToFile('boop\n', next); |
|||
}); |
|||
|
|||
// schedule a task that can happen whenever
|
|||
schedule.unordered('pewpew', function (next) { |
|||
displayFileSize(next); |
|||
}); |
|||
|
|||
// schedule a blocking task which will wait
|
|||
// until the all unordered tasks have completed before commencing
|
|||
schedule.blocking('pewpew', function (next) { |
|||
deleteFile(next); |
|||
}); |
|||
|
|||
// this will be queued for after the blocking task
|
|||
schedule.ordered('pewpew', function (next) { |
|||
appendFile('boom', next); |
|||
}); |
|||
|
|||
*/ |
|||
|
|||
// return a uid which is not already in a map
|
|||
var unusedUid = function (set) { |
|||
var uid = Util.uid(); |
|||
if (set[uid]) { return unusedUid(); } |
|||
return uid; |
|||
}; |
|||
|
|||
// return an existing session, creating one if it does not already exist
|
|||
var lookup = function (map, id) { |
|||
return (map[id] = map[id] || { |
|||
//blocking: [],
|
|||
active: {}, |
|||
blocked: {}, |
|||
}); |
|||
}; |
|||
|
|||
var isEmpty = function (map) { |
|||
for (var key in map) { |
|||
if (map.hasOwnProperty(key)) { return false; } |
|||
} |
|||
return true; |
|||
}; |
|||
|
|||
module.exports = function () { |
|||
// every scheduler instance has its own queue
|
|||
var queue = WriteQueue(); |
|||
|
|||
// ordered tasks don't require any extra logic
|
|||
var Ordered = function (id, task) { |
|||
queue(id, task); |
|||
}; |
|||
|
|||
// unordered and blocking tasks need a little extra state
|
|||
var map = {}; |
|||
|
|||
// regular garbage collection keeps memory consumption low
|
|||
var collectGarbage = function (id) { |
|||
// avoid using 'lookup' since it creates a session implicitly
|
|||
var local = map[id]; |
|||
// bail out if no session
|
|||
if (!local) { return; } |
|||
// bail out if there are blocking or active tasks
|
|||
if (local.lock) { return; } |
|||
if (!isEmpty(local.active)) { return; } |
|||
// if there are no pending actions then delete the session
|
|||
delete map[id]; |
|||
}; |
|||
|
|||
// unordered tasks run immediately if there are no blocking tasks scheduled
|
|||
// or immediately after blocking tasks finish
|
|||
var runImmediately = function (local, task) { |
|||
// set a flag in the map of active unordered tasks
|
|||
// to prevent blocking tasks from running until you finish
|
|||
var uid = unusedUid(local.active); |
|||
local.active[uid] = true; |
|||
|
|||
task(function () { |
|||
// remove the flag you set to indicate that your task completed
|
|||
delete local.active[uid]; |
|||
// don't do anything if other unordered tasks are still running
|
|||
if (!isEmpty(local.active)) { return; } |
|||
// bail out if there are no blocking tasks scheduled or ready
|
|||
if (typeof(local.waiting) !== 'function') { |
|||
return void collectGarbage(); |
|||
} |
|||
setTimeout(local.waiting); |
|||
}); |
|||
}; |
|||
|
|||
var runOnceUnblocked = function (local, task) { |
|||
var uid = unusedUid(local.blocked); |
|||
local.blocked[uid] = function () { |
|||
runImmediately(local, task); |
|||
}; |
|||
}; |
|||
|
|||
// 'unordered' tasks are scheduled to run in after the most recently received blocking task
|
|||
// or immediately and in parallel if there are no blocking tasks scheduled.
|
|||
var Unordered = function (id, task) { |
|||
var local = lookup(map, id); |
|||
if (local.lock) { return runOnceUnblocked(local, task); } |
|||
runImmediately(local, task); |
|||
}; |
|||
|
|||
var runBlocked = function (local) { |
|||
for (var task in local.blocked) { |
|||
runImmediately(local, local.blocked[task]); |
|||
} |
|||
}; |
|||
|
|||
// 'blocking' tasks must be run alone.
|
|||
// They are queued alongside ordered tasks,
|
|||
// and wait until any running 'unordered' tasks complete before commencing.
|
|||
var Blocking = function (id, task) { |
|||
var local = lookup(map, id); |
|||
|
|||
queue(id, function (next) { |
|||
// start right away if there are no running unordered tasks
|
|||
if (isEmpty(local.active)) { |
|||
local.lock = true; |
|||
return void task(function () { |
|||
delete local.lock; |
|||
runBlocked(local); |
|||
next(); |
|||
}); |
|||
} |
|||
// otherwise wait until the running tasks have completed
|
|||
local.waiting = function () { |
|||
local.lock = true; |
|||
task(function () { |
|||
delete local.lock; |
|||
delete local.waiting; |
|||
runBlocked(local); |
|||
next(); |
|||
}); |
|||
}; |
|||
}); |
|||
}; |
|||
|
|||
return { |
|||
ordered: Ordered, |
|||
unordered: Unordered, |
|||
blocking: Blocking, |
|||
}; |
|||
}; |
|||
1766
rpc.js
File diff suppressed because it is too large
View File
File diff suppressed because it is too large
View File
@ -0,0 +1,220 @@ |
|||
/* three types of actions: |
|||
* read |
|||
* write |
|||
* append |
|||
each of which take a random amount of time |
|||
|
|||
*/ |
|||
var Util = require("../../lib/common-util"); |
|||
var schedule = require("../../lib/schedule")(); |
|||
var nThen = require("nthen"); |
|||
|
|||
var rand = function (n) { |
|||
return Math.floor(Math.random() * n); |
|||
}; |
|||
|
|||
var rand_time = function () { |
|||
// between 51 and 151
|
|||
return rand(300) + 25; |
|||
}; |
|||
|
|||
var makeAction = function (type) { |
|||
var i = 0; |
|||
return function (time) { |
|||
var j = i++; |
|||
return function (next) { |
|||
console.log(" Beginning action: %s#%s", type, j); |
|||
setTimeout(function () { |
|||
console.log(" Completed action: %s#%s", type, j); |
|||
next(); |
|||
}, time); |
|||
return j; |
|||
}; |
|||
}; |
|||
}; |
|||
|
|||
var TYPES = ['WRITE', 'READ', 'APPEND']; |
|||
var chooseAction = function () { |
|||
var n = rand(100); |
|||
|
|||
if (n < 50) { return 'APPEND'; } |
|||
if (n < 90) { return 'READ'; } |
|||
return 'WRITE'; |
|||
|
|||
//return TYPES[rand(3)];
|
|||
}; |
|||
|
|||
var test = function (script, cb) { |
|||
var uid = Util.uid(); |
|||
|
|||
var TO_RUN = script.length; |
|||
var total_run = 0; |
|||
|
|||
var parallel = 0; |
|||
var last_run_ordered = -1; |
|||
//var i = 0;
|
|||
|
|||
var ACTIONS = {}; |
|||
TYPES.forEach(function (type) { |
|||
ACTIONS[type] = makeAction(type); |
|||
}); |
|||
|
|||
nThen(function (w) { |
|||
setTimeout(w(), 3000); |
|||
// run scripted actions with assertions
|
|||
script.forEach(function (scene) { |
|||
var type = scene[0]; |
|||
var time = typeof(scene[1]) === 'number'? scene[1]: rand_time(); |
|||
|
|||
var action = ACTIONS[type](time); |
|||
console.log("Queuing action of type: %s(%s)", type, time); |
|||
|
|||
var proceed = w(); |
|||
|
|||
switch (type) { |
|||
case 'APPEND': |
|||
return schedule.ordered(uid, w(function (next) { |
|||
parallel++; |
|||
var temp = action(function () { |
|||
parallel--; |
|||
total_run++; |
|||
proceed(); |
|||
next(); |
|||
}); |
|||
if (temp !== (last_run_ordered + 1)) { |
|||
throw new Error("out of order"); |
|||
} |
|||
last_run_ordered = temp; |
|||
})); |
|||
case 'WRITE': |
|||
return schedule.blocking(uid, w(function (next) { |
|||
parallel++; |
|||
action(function () { |
|||
parallel--; |
|||
total_run++; |
|||
proceed(); |
|||
next(); |
|||
}); |
|||
if (parallel > 1) { |
|||
console.log("parallelism === %s", parallel); |
|||
throw new Error("too much parallel"); |
|||
} |
|||
})); |
|||
case 'READ': |
|||
return schedule.unordered(uid, w(function (next) { |
|||
parallel++; |
|||
action(function () { |
|||
parallel--; |
|||
total_run++; |
|||
proceed(); |
|||
next(); |
|||
}); |
|||
})); |
|||
default: |
|||
throw new Error("wut"); |
|||
} |
|||
}); |
|||
}).nThen(function () { |
|||
// make assertions about the whole script
|
|||
if (total_run !== TO_RUN) { |
|||
console.log("Ran %s / %s", total_run, TO_RUN); |
|||
throw new Error("skipped tasks"); |
|||
} |
|||
console.log("total_run === %s", total_run); |
|||
|
|||
cb(); |
|||
}); |
|||
}; |
|||
|
|||
|
|||
var randomScript = function () { |
|||
var len = rand(15) + 10; |
|||
var script = []; |
|||
while (len--) { |
|||
script.push([ |
|||
chooseAction(), |
|||
rand_time(), |
|||
]); |
|||
} |
|||
return script; |
|||
}; |
|||
|
|||
var WRITE = function (t) { |
|||
return ['WRITE', t]; |
|||
}; |
|||
var READ = function (t) { |
|||
return ['READ', t]; |
|||
}; |
|||
|
|||
var APPEND = function (t) { |
|||
return ['APPEND', t]; |
|||
}; |
|||
|
|||
nThen(function (w) { |
|||
test([ |
|||
['READ', 150], |
|||
['APPEND', 200], |
|||
['APPEND', 100], |
|||
['READ', 350], |
|||
['WRITE', 400], |
|||
['APPEND', 275], |
|||
['APPEND', 187], |
|||
['WRITE', 330], |
|||
['WRITE', 264], |
|||
['WRITE', 256], |
|||
], w(function () { |
|||
console.log("finished pre-scripted test\n"); |
|||
})); |
|||
}).nThen(function (w) { |
|||
test([ |
|||
WRITE(289), |
|||
APPEND(281), |
|||
READ(207), |
|||
WRITE(225), |
|||
READ(279), |
|||
WRITE(300), |
|||
READ(331), |
|||
APPEND(341), |
|||
APPEND(385), |
|||
READ(313), |
|||
WRITE(285), |
|||
READ(304), |
|||
APPEND(273), |
|||
APPEND(150), |
|||
WRITE(246), |
|||
READ(244), |
|||
WRITE(172), |
|||
APPEND(253), |
|||
READ(215), |
|||
READ(296), |
|||
APPEND(281), |
|||
APPEND(296), |
|||
WRITE(168), |
|||
], w(function () { |
|||
console.log("finished 2nd pre-scripted test\n"); |
|||
})); |
|||
}).nThen(function () { |
|||
var totalTests = 50; |
|||
var randomTests = 1; |
|||
|
|||
var last = nThen(function () { |
|||
console.log("beginning randomized tests"); |
|||
}); |
|||
|
|||
var queueRandomTest = function (i) { |
|||
last = last.nThen(function (w) { |
|||
console.log("running random test script #%s\n", i); |
|||
test(randomScript(), w(function () { |
|||
console.log("finished random test #%s\n", i); |
|||
})); |
|||
}); |
|||
}; |
|||
|
|||
while (randomTests <=totalTests) { queueRandomTest(randomTests++); } |
|||
|
|||
last.nThen(function () { |
|||
console.log("finished %s random tests", totalTests); |
|||
}); |
|||
}); |
|||
|
|||
|
|||
Write
Preview
Loading…
Cancel
Save