1089 Commits

Author SHA1 Message Date
Julien Cabillot 20a564975f Add Jenkinsfile 1 year ago
yflory e58dfbabd4 Merge branch 'master' of github.com:xwiki-labs/cryptpad 9 months ago
yflory 13e1d1267d Add chat ID in the console 9 months ago
ansuz c860819eb1 guard against coercing `undefined` to a string 9 months ago
yflory cf8841cdfa Remove cba on non-owned pads 9 months ago
yflory e5f1fa7ef0 Fix color by author automatically enabled for non-owned pads 9 months ago
ansuz b0b4029556 modify changelog with updated instructions for author colors 9 months ago
ansuz e05891184e fix regression in maxWorkers 9 months ago
yflory 2695b51b0d Merge branch 'staging' of github.com:xwiki-labs/cryptpad into staging 9 months ago
yflory 440980c4af Display preview with a single click 9 months ago
ansuz 7bdd367d88 remove log statements 9 months ago
yflory 89de5869c5 Fix cursor and scroll position in codemirror after undo 9 months ago
yflory aeb81fdfad lint compliance 9 months ago
yflory 872e636ac5 Fix access modal in drive 9 months ago
yflory 97f9244efd Merge branch 'soon' into staging 9 months ago
yflory c2e0ed0d59 Merge branch 'staging' of github.com:xwiki-labs/cryptpad into staging 9 months ago
yflory 1d0feeb323 Fix cba reset on join. Change how the cba state is stored 9 months ago
Weblate ac2cb29cf7 Translated using Weblate (French) 9 months ago
Weblate 65c8c043d5 Translated using Weblate (German) 9 months ago
Weblate 0544c31bea Translated using Weblate (English) 9 months ago
ansuz 7e334d0e3a add cursors to media-tags and mermaid in markdown preview 9 months ago
ansuz a8c8b85f0c add an XXX note for the 3.16.0 release 9 months ago
ansuz a04461f7c2 Merge branch 'staging' of github.com:xwiki-labs/cryptpad into staging 9 months ago
ansuz 9efbe59361 force a page reload for patch version changes 9 months ago
Weblate b8d13328cb Translated using Weblate (German) 9 months ago
yflory d8855ed5f7 Remove XXX 9 months ago
ansuz f4f4e0011f don't merge this, it's very wrong 9 months ago
yflory 593011a657 Merge branch 'cba' into staging 9 months ago
yflory 8896b2eae2 Fix mediatag preview 9 months ago
ansuz df6e12877b update changelog for 3.16.0 9 months ago
yflory 22a2b85fae Fix broken share modal 9 months ago
yflory f2641bc0ee Fix initial marker 9 months ago
yflory 6d84fc5b8a Add initial marker to fix errors while 'joining' markers 9 months ago
Weblate 723549c0ba Translated using Weblate (English) 9 months ago
Weblate 9656abcade Translated using Weblate (French) 9 months ago
yflory c6cb9876a7 Fix secure iframe conflict with cba button 9 months ago
yflory 49eacf752b Fix markers errors 9 months ago
yflory 0ca779dbd1 Fix 'enable cba' button in properties 9 months ago
yflory b208ca367d Merge branch 'staging' into cba 9 months ago
yflory 9f5f4a4d52 Fix possible duplicate author id 9 months ago
ansuz cb04bec348 Merge branch 'soon' into staging 9 months ago
yflory 41c0704ad8 Page mode by default in rich text 9 months ago
yflory ca9f874afb Redraw mermaid on preview 9 months ago
Weblate 14b4411110 Translated using Weblate (German) 9 months ago
ansuz f03f70345a remove an XXX note 9 months ago
ansuz b56c73be6e delegate more work from getOlderHistory to the worker 9 months ago
ansuz fd169ff39c close streams after 2 minutes instead of 5 9 months ago
ansuz f42c8e35b1 update version to 3.16.0 (Quagga) 9 months ago
yflory 1e9b9913c5 Better debug function and fix marker error 9 months ago
ansuz d6d4c4c70b Merge branch 'soon' into staging 9 months ago
ansuz 18c5d48474 Merge branch 'staging' of github.com:xwiki-labs/cryptpad into staging 9 months ago
ansuz bac8ef3c6e lint compliance 9 months ago
yflory 19e565a7dc Merge branch 'staging' of github.com:xwiki-labs/cryptpad into staging 9 months ago
yflory adae4b690b outer.css cache busting 9 months ago
ansuz ef1d5bd706 Merge branch 'ckeditor-default-size' into staging 9 months ago
ansuz d0c3f0fa84 remove XXX notes 9 months ago
Weblate 8009c01f70 Translated using Weblate (French) 9 months ago
Weblate fab1fcdb33 Translated using Weblate (Dutch) 9 months ago
Weblate 5807f3680b Translated using Weblate (German) 9 months ago
Weblate 8b0006fba7 Translated using Weblate (French) 9 months ago
Weblate 78afbaa6a8 Translated using Weblate (English) 9 months ago
yflory 9ba2d11cd5 Guard against a type error 9 months ago
yflory d27dc768f3 Fix missing button to enable cba 9 months ago
yflory 283f739be5 lint compliance 9 months ago
yflory dd814713dd Merge branch 'staging' into cba 9 months ago
yflory f03713e60a Fix small issues, clean debugging logs and add known issues 9 months ago
yflory 92738828ff Merge branch 'debugtime' into cba 9 months ago
yflory 1d3f0ded81 Fix more markers errors 9 months ago
ansuz 4e57e390da write tasks inside of workers 9 months ago
ansuz c39adb9bff configure ckeditor to allow unsetting font size 9 months ago
ansuz ba6faca02e make the number of workers configurable 9 months ago
ansuz 0465f31a45 add a FIXME note 9 months ago
ansuz 04ab7f538a recover from worker faults and unify worker types 9 months ago
ansuz e0a6852b79 correctly serialize an error log 9 months ago
yflory 38acd01b35 Fix issues with falsy values 9 months ago
yflory b74a4b6bb4 Fix more cba issues and add debugging data 9 months ago
yflory 026bf6a425 Fix obvious issues in cba 9 months ago
yflory 9c5f0c0d6f Use rgba instead of #RRGGBBAA for the author markers 9 months ago
ansuz 9ed82640bf Merge branch 'worker-queue' into staging 9 months ago
ansuz e8b1fcf710 solve some cases where crashing workers could result in an invalid state for the queue 9 months ago
ansuz 9ed26cfeb0 lint compliance 9 months ago
yflory cdcb01623d Merge branch 'q' into staging 9 months ago
yflory 7890342ae1 Add cba hint 9 months ago
yflory fd2ee0fced Add comment 9 months ago
yflory 1dee2fd13c Merge branch 'staging' of github.com:xwiki-labs/cryptpad into staging 9 months ago
yflory f83179cb62 Fix mediatag cache preventing preview modal 9 months ago
ansuz dc0460780f Merge branch 'soon' into staging 9 months ago
ansuz c46561e98d Merge branch 'master' into soon 9 months ago
ansuz 22186ccb62 Merge branch 'mermaid-styles2' into staging 9 months ago
yflory 94c9e47d32 Remember the state of cba per user in their owned pads 9 months ago
yflory 87fc1f8daf Show/hide and enable/disable cba 9 months ago
yflory 88760fde6a Clean the code 9 months ago
ansuz 74567e0cf4 return process memory usage in GET CACHE STATS RPC 9 months ago
ansuz 713fa9ef9a
Merge pull request #525 from ma-neumann/master 9 months ago
ansuz 9a7681dc5d add a note about handling syntax errors in mermaid 9 months ago
ansuz ca2dc85b34 reduce eye-bleed with gentler styles for mermaid charts 9 months ago
yflory d41c362d46 Fix more errors after ot 9 months ago
yflory 5fe7f55f51 Fix share with a team when we don't have contacts yet 9 months ago
Martin Alexander Neumann e3f42cbc20 Allow config.defaultStorageLimit to be zero bytes 9 months ago
yflory a70233d492 Fix OT errors 9 months ago
ansuz 89262cd29e resolve silly conflict and merge staging 9 months ago
ansuz 44a3c72f13 Merge branch 'soon' into staging 9 months ago
ansuz de6594f4c9 add some XXX notes 9 months ago
ansuz 09bf0a54d8 add access control header 9 months ago
ansuz e527867e2e queue blob and channel deletions per-user 9 months ago
Weblate ebd4998edd Translated using Weblate (Catalan) 9 months ago
yflory 62fde59a89 temp 9 months ago
ansuz bd3e18d1a8 only cache a user's pinned channels if there are no errors when loading them 9 months ago
yflory cba8f5fce6 Merge branch 'staging' into stoppegp-code-authorcolors-PR 9 months ago
yflory ee3adccafc Merge branch 'code-authorcolors-PR' of https://github.com/stoppegp/cryptpad into stoppegp-code-authorcolors-PR 9 months ago
ansuz bef18a9320 hotfix 9 months ago
ansuz 7fac997e93 increase some file storage timeouts related to streams 9 months ago
ansuz 95965c1dee keep a parallel implementation of the pin loader to validate the new one 9 months ago
stoppegp 272c1007db authorcolor storage optimizations 9 months ago
stoppegp 56031a5c14 authorcolor storage optimization, add undefined checks before access, code styling 9 months ago
stoppegp 79325b8cca bugfix authorcolors: wrong end position on multiline edit 9 months ago
stoppegp 5f7bc9fca5 fix remove author color button when nothing is selected 9 months ago
yflory 1551a1b969 lint compliance 9 months ago
yflory 0ccc57f6f7 Protect current pad keys in the unsafe iframe 9 months ago
yflory 43904df0fb Display properties modal in the secure iframe 9 months ago
stoppegp 8696ecc692 add 'remove authorcolors' button 9 months ago
stoppegp 45b5eb7cac add basic author colors to code app 9 months ago
ansuz dca2707ae3 guard against incorrect types in /auth/ 9 months ago
yflory ca8c50ca37 Merge branch 'staging' into secureiframe 9 months ago
yflory e5bcaeedce Fix regression in the access modal 9 months ago
yflory 4f838777ab Merge branch 'q' into secureiframe 9 months ago
yflory 2aec7c2569 Open the access modal in the secure iframe 9 months ago
yflory 6103588faa Fix access modal bug introduced with the latest commit 9 months ago
yflory 19a0dafbe1 Merge branch 'q' into secureiframe 9 months ago
yflory 62187d855f lint compliance 9 months ago
yflory fc2cd19821 lint compliance 9 months ago
yflory 894a355f0a Merge filepicker and share iframes 9 months ago
yflory 4672bf794b Make sure we don't try to get metadata for blobs 9 months ago
yflory 9de345044a Prompt anonymous users to login if they should create a checkpoint in oo 9 months ago
ansuz e1069b0abb 3.15.0 changelog 9 months ago
ansuz 2e290a6667 last minute fixes and notes 9 months ago
ansuz a57bf94058 remove XXX note 9 months ago
ansuz 9ef755c07b update minor version and codename 9 months ago
yflory a834d653aa Fix download button size in mediatag preview 9 months ago
David Benqué bee2a44c5a Merge remote-tracking branch 'origin/staging' into staging 9 months ago
David Benqué f95c385c82 filepicker button 9 months ago
David Benqué e1be241406 filepicker thumbnail style 9 months ago
ansuz 7434c344f5 Merge branch 'soon' into staging 9 months ago
ansuz 7027033ed1 Merge branch 'staging' of github.com:xwiki-labs/cryptpad into staging 9 months ago
ansuz c53675c9d5 WIP worker rpc call queue 9 months ago
Weblate 3f126f1bec Translated using Weblate (German) 9 months ago
Weblate 6ea958c7bd Translated using Weblate (Finnish) 9 months ago
yflory f5de72dcb2 Merge branch 'staging' of github.com:xwiki-labs/cryptpad into staging 9 months ago
yflory 15d86c9bd1 Merge branch 'pcs' into staging 9 months ago
yflory 8370f3121c Remove XXX 9 months ago
ansuz d802173325 remove an invalid line 9 months ago
ansuz 90518224fe Merge branch 'staging' of github.com:xwiki-labs/cryptpad into staging 9 months ago
ansuz 5196440e65 guard against a typeError when httpSafeOrigin is not defined 9 months ago
yflory 3aefc3a893 Fix icon size in the new pad modal 9 months ago
yflory 0f21f3118b Fix lightbox errors 9 months ago
yflory c29b7d3389 Fix reference error 9 months ago
yflory 20111e8416 Drop support for external URL as avatar 9 months ago
yflory 9da47ebac9 lint compliance 9 months ago
yflory 033b784576 Add max file size in whiteboard 9 months ago
yflory 175ee492a5 Merge branch 'staging' into lightbox 9 months ago
yflory 35394476a6 Improve scrollbars and modal size 9 months ago
ansuz 2b3f6e3464 Merge branch 'soon' into staging 9 months ago
ansuz 9a137e0025 Merge branch 'master' into soon 9 months ago
ansuz aace3c006c Merge branch 'csp-issue' into staging 9 months ago
ansuz 9fa93172cf tolerate trailing slashes in httpSafeDomain 9 months ago
Weblate 427389d1c7 Translated using Weblate (French) 9 months ago
Weblate 7ccfe7a673 Translated using Weblate (English) 9 months ago
yflory 18a4d2a72c Merge duplicated code between framework and whiteboard 9 months ago
yflory 7ca45eac72 Add preview for mermaid graphs in code 9 months ago
yflory 3bc32f6085 Start at the correct index when previewing multiple mediatags 9 months ago
yflory b4c61ee753 Fix preview modal not stopping keydown propagation in drive 9 months ago
yflory c6fa00b14c Add a start value to the new debug tool to simulate a user joining from a checkpoint 9 months ago
yflory ae78f0a5df Merge branch 'staging' into debugtime 9 months ago
yflory 5739773378 Merge branch 'staging' of github.com:xwiki-labs/cryptpad into staging 9 months ago
yflory 902e9624e8 Don't send encrypted messages to chainpad 9 months ago
yflory 8a658336a7 Fix issues with the new debugging code 9 months ago
ansuz adfd1eaaa2 avoid a silly typeError in the event of an error 9 months ago
ansuz 3b05d24f10 wip csp issue 9 months ago
ansuz 2daf759299 remove clickable items from rendered mermaid 9 months ago
ansuz 110a26cc85 Merge branch 'soon' into staging 9 months ago
yflory d12fad3420 Fix kanban edit modal not locked on disconnect 9 months ago
yflory 572f81877f Fix reconnecting issues after EUNKNOWN and ENOENT 9 months ago
yflory c0f8129675 Merge branch 'staging' of github.com:xwiki-labs/cryptpad into staging 9 months ago
yflory 32406a5995 Merge branch 'master' into staging 9 months ago
yflory 1169156e55 Replay history in order 9 months ago
Weblate 1f3e47e109 Translated using Weblate (Italian) 9 months ago
ansuz 0c65a69bab resolve merge conflict 9 months ago
ansuz 834c96a4fc update comments and remove some notes 9 months ago
ansuz ed5d97f899 WIP history-keeper fixes 9 months ago
yflory 572db00987 Preview multiple mediatags 9 months ago
yflory 81b460abd8 Open pdf in the file app 9 months ago
yflory d8d515a450 Add spinner while loading preview 9 months ago
yflory 6a10ec711a Preview mediatag in the drive and in diffMarked 9 months ago
yflory 817309d602 Move code from ui-elements to common-interface 9 months ago
yflory 179e7d68b4 Add new common-mediatag file 9 months ago
yflory b27eb956df Merge branch 'staging' into lightbox 9 months ago
ansuz eddbe80eef Merge branch 'soon' into staging 9 months ago
ansuz 1a825ad664 Merge branch 'master' into soon 9 months ago
ansuz ad118222d0 add a new test to send a stream of data to a mailbox 9 months ago
ansuz 3d8e78a268 fix overly broad config interpretation for daily check 9 months ago
ansuz e6ec891d9a improve admin rpc for diagnosing memory used by the cache 9 months ago
ansuz 77961e3954 trim lookup tables for channels without checkpoints 9 months ago
yflory 44d05d1756 Merge branch 'master' of github.com:xwiki-labs/cryptpad 9 months ago
yflory 8210ae3a09 Fix duplicate text bug #352 9 months ago
ansuz b588e8ed43 Merge branch 'soon' into staging 9 months ago
ansuz a6943b12b4 Merge branch 'soon' 9 months ago
Weblate 4963806cf2 Translated using Weblate (Italian) 9 months ago
yflory b4b1ad893f Disable cursor position in rich text (possible cause of duplicate text) 9 months ago
yflory 938e5c5cb1 Merge branch 'staging' into lightbox 9 months ago
yflory e5d4dbea30 Merge branch 'staging' of github.com:xwiki-labs/cryptpad into staging 9 months ago
yflory acada4c733 lint compliance 9 months ago
yflory 75e5d3cc42 Move the mediatag and avatar code outsite of common-ui-elements 9 months ago
Weblate 80ac7c8a96 Translated using Weblate (Italian) 9 months ago
Weblate 0ff883f8b2 Translated using Weblate (Italian) 9 months ago
ansuz db9eaa8d4f Merge branch 'soon' into staging 9 months ago
ansuz 6d0dee979a allow admins to inspect index/metadata caches 9 months ago
ansuz c8c98b5747 Merge branch 'soon' of github.com:xwiki-labs/cryptpad into soon 9 months ago
Weblate 4ce980f406 Translated using Weblate (Spanish) 9 months ago
Weblate 2df7c62613 Translated using Weblate (Italian) 9 months ago
Weblate b4d61f0519 Translated using Weblate (Swedish) 9 months ago
ansuz cbd3547814 remove log statements 9 months ago
ansuz 9058a59555 reassign db tasks if the responsible worker fails 9 months ago
ansuz 172823c954 lint compliance 9 months ago
ansuz b5649707d1 export 'tryParse' command 9 months ago
ansuz 64b0879984 improve logging for parse errors 9 months ago
ansuz 3f86b6141e rename worker processes 9 months ago
ansuz d8a88cb4ca run expiration tasks in a worker instead of the main process 9 months ago
ansuz b0179eaad9 drop XXX note 9 months ago
ansuz 9dbd32758a improve worker logging and move blob deletion to worker processes as well 9 months ago
yflory 7047abe407 Fix cache issues with mermaid 9 months ago
yflory 812d8f8770 Deprecate 'skip PCS' feature 9 months ago
yflory 05cb082e2f Stop refreshing the usage bar if the tab is not visible 9 months ago
yflory 980baf9ce3 Fix usage bar in drive and read only teams 9 months ago
yflory 9a4da6e6e0 Remove console log 9 months ago
yflory 6cde052a46 Update mermaid 9 months ago
ansuz 5f69fc18d0 suppress some noisy errors 9 months ago
ansuz 5f2d7c8dcf increase worker rpc wait time before timeout 9 months ago
ansuz 33e8e65507 handle errors in the server's workers 9 months ago
ansuz a4c8039cc7 improve error handling with rpc response API 9 months ago
ansuz cb53bd1c15 lint compliance 9 months ago
yflory 7b82d9dba4 Remove/fix XXX 9 months ago
ansuz 72ccd55e83 use latest chainpad-listmap 9 months ago
ansuz c0bb5c6427 use latest chainpad 9 months ago
yflory c7688b7c8c Merge branch 'duplicateText2' into staging 9 months ago
yflory e15196bd53 Merge branch 'reconnect' into staging 9 months ago
yflory cad12ab31f Merge branch 'soon' into staging 9 months ago
yflory 5248c81b65 Fix opening a text blob in the code editor 9 months ago
yflory 7b15775d75 Merge branch 'staging' of github.com:xwiki-labs/cryptpad into staging 9 months ago
yflory ed722f7385 Fix properties and access modal being opened multiple times at once 9 months ago
yflory 36d7bb031f Merge branch 'soon' of github.com:xwiki-labs/cryptpad into soon 9 months ago
Weblate 517e11c994 Translated using Weblate (Italian) 9 months ago
Weblate 1d6c108cfd Translated using Weblate (German) 9 months ago
yflory 86959bc048 Add Hindi translation 9 months ago
ansuz 14feef1757 calculate pin list checksums in a worker 9 months ago
ansuz 51e6fe1cce Merge branch 'scaling-file-size' into soon 9 months ago
ansuz c4f3026331 don't bother sending IS_CHANNEL_PINNED requests 9 months ago
ansuz 70a0d4efb4 move more database reads into the database worker 9 months ago
David Benqué 01cdac21cc remove // XXX related to Whiteboard keys 9 months ago
Weblate 963da1c145 Translated using Weblate (French) 9 months ago
Weblate c9fa814e00 Translated using Weblate (English) 9 months ago
David Benqué b20beee2dc remove // XXX related to translation keys 9 months ago
yflory d7b2876711 Fix UI for brush settings in whiteboard 9 months ago
yflory c92db4fd46 Merge branch 'staging' of github.com:xwiki-labs/cryptpad into staging 9 months ago
yflory f780087ac7 Better UI for whiteboard controls 9 months ago
Weblate 67c933622d Translated using Weblate (French) 9 months ago
Weblate 9521b9dd37 Translated using Weblate (English) 9 months ago
Weblate 0453ec60e8 Translated using Weblate (Swedish) 9 months ago
Weblate 008bc42a63 Translated using Weblate (Japanese) 9 months ago
Weblate 4add675921 Translated using Weblate (Italian) 9 months ago
Weblate 454d951621 Translated using Weblate (German) 9 months ago
ansuz bc13a21796 give fileStreams a little bit more time before closing them 9 months ago
ansuz 4ba36a9173 load user pins in the database worker 9 months ago
yflory fbdb8e547e Merge branch 'soon' into staging 9 months ago
yflory dcfd9c5a73 Smaller palette in whiteboard 9 months ago
ansuz 50e8893b24 move the 'getOlderHistory' call into the database worker 9 months ago
ansuz 320778f54f use latest chainpad-server 9 months ago
ansuz 73f3933e03 Merge branch 'soon' of github.com:xwiki-labs/cryptpad into soon 10 months ago
ansuz 471e374533 compute metadata in the same child process that builds indexes 10 months ago
ansuz 479b76f848 lint compliance 10 months ago
David Benqué bad3ae3efd move copy key button 10 months ago
ansuz b134799449 Merge branch 'soon' of github.com:xwiki-labs/cryptpad into soon 10 months ago
ansuz c2fcba31e3 remove an XXX 10 months ago
ansuz 967ca6afa9 suppress ENOENT errors when checking empty chat channels 10 months ago
ansuz 40251948d4 check authenticated rpc signatures in separate threads 10 months ago
yflory a6f8160ab4 Merge branch 'soon' of github.com:xwiki-labs/cryptpad into soon 10 months ago
yflory ba4614f0e8 Fix a type error in onlyoffice 10 months ago
yflory facc6d877a Remove hardcoded translation key 10 months ago
ansuz 0e09c73a60 Merge branch 'master' into soon 10 months ago
ansuz a2facc4a0d
Merge pull request #513 from 7adietri/patch-1 10 months ago
ansuz e128683ffb call back with errors instead of throwing within the Storage module 10 months ago
ansuz 1240f4a2a5 Merge branch 'master' into soon 10 months ago
ansuz 90899aa2f4 start reviewing and removing XXX notes 10 months ago
yflory 64596b8097 lint compliance 10 months ago
yflory fe14399e67 Merge branch 'whiteboard' into soon 10 months ago
yflory ae1b59b2c1 Fix canvas initial size 10 months ago
yflory 24e474e688 Big canvas 10 months ago
yflory 9aaddea148 Add edPublic to user profiles 10 months ago
yflory e74f713983 Merge branch 'soon' of github.com:xwiki-labs/cryptpad into soon 10 months ago
yflory a4d1b47a4c Fix 'readonly' mode after disconnection in onlyoffice 10 months ago
yflory 2c4c18bef6 Merge branch 'soon' into staging 10 months ago
ansuz 2ef0af29e5 guard against index worker rpc responses for a txid with no supplied callback 10 months ago
ansuz cc335a54b1 update chainpad-server to latest version 10 months ago
ansuz ee8d5c9c36 fix the premiumUploadSize validation to check the correct attribute 10 months ago
yflory 2fe417bf52 Send contacts public data to accounts 10 months ago
yflory dc0b26da0b Merge branch 'soon' into staging 10 months ago
yflory de456cd70a Merge branch 'staging' of github.com:xwiki-labs/cryptpad into staging 10 months ago
yflory 8e8b371690 Fix issues with trim history in spreadsheets 10 months ago
Weblate f3e6c6c36d Translated using Weblate (Italian) 10 months ago
yflory 7ddc472658 Restore Dutch translation 10 months ago
Weblate 6d305cd411 Translated using Weblate (Portuguese (Brazil)) 10 months ago
Weblate 74743610d5 Translated using Weblate (Polish) 10 months ago
Weblate e03edf2762 Translated using Weblate (Spanish) 10 months ago
Weblate 4cc3091270 Translated using Weblate (Italian) 10 months ago
Weblate f672abf939 Translated using Weblate (Swedish) 10 months ago
Weblate de1f57ca63 Translated using Weblate (Japanese) 10 months ago
Weblate 43c8d52352 Translated using Weblate (Norwegian Bokmål) 10 months ago
Weblate 638f4ff955 Translated using Weblate (Russian) 10 months ago
Weblate 014115fbc5 Translated using Weblate (English) 10 months ago
Weblate 399f4a27a6 Translated using Weblate (Chinese (Simplified)) 10 months ago
Weblate 4d9e2cd740 Translated using Weblate (Romanian) 10 months ago
Weblate 4e16bc2dcc Translated using Weblate (Swedish) 10 months ago
Weblate 46aa0c633d Translated using Weblate (Italian) 10 months ago
Weblate ca4724f8e0 Added translation using Weblate (Swedish) 10 months ago
Weblate 0e18d1f8d6 Translated using Weblate (Italian) 10 months ago
Weblate f7ee77749a Translated using Weblate (Italian) 10 months ago
ansuz a8b2c03dbf avoid merge conflict with weblate 10 months ago
Weblate 4a37a386e7 Translated using Weblate (Italian) 10 months ago
ansuz 552966a1f6 add Svenska translation 10 months ago
Alexander a190af079d
Add shutdown() to non-disk logger 10 months ago
ansuz ed63d98ab0 Merge branch 'scaling-index' into soon 10 months ago
ansuz bf2e5aeca1 add an admin panel block to display the open file count 10 months ago
ansuz f9706623d5 Merge branch 'soon' into staging 10 months ago
ansuz 605aae222c update less devDependency to match client version 10 months ago
ansuz 590e249500 resolve merge conflict 10 months ago
ansuz 31c7cecaf9 add a few checks for safety 10 months ago
ansuz cea9705bbe Merge branch 'aggressive-stream-closing' into staging 10 months ago
ansuz e78b6e679d Merge branch 'soon' into staging 10 months ago
yflory dc840c2a70 Fix checkboxes in the markdown renderer #511 10 months ago
ansuz faa133aab8 remove an XXX 10 months ago
yflory a13561eb8d Fix checkboxes in the markdown renderer #511 10 months ago
ansuz d386e223e4 simplify open/close of writeStreams 10 months ago
ansuz 32cd0f3c4d increase timeout value from 15s to 45. ought to be enough for anybody 10 months ago
ansuz 05a4e86cdb close streams whenever we finish using them. time out if necessary 10 months ago
ansuz 4522ffa18a compute indexes in child processes 10 months ago
yflory 12520ecb53 Remove unnecessary log 10 months ago
yflory 1ea00cfdfa Remove invalid 'DISCONNECTED' message in drive history mode 10 months ago
ansuz d1b16af160 update changelog 10 months ago
ansuz 1200834800 Merge branch 'soon' 10 months ago
Weblate d28039ff5b Translated using Weblate (German) 10 months ago
yflory 4cada76c7e Merge branch 'ooBuild' into soon 10 months ago
ansuz 2e9052cb42 Merge branch 'scaling2' into soon 10 months ago
ansuz f8ad649b45 [style] bail out early to avoid nesting 10 months ago
yflory f0ff7d1968 Merge branch 'soon' into ooBuild 10 months ago
yflory 3b82c5504e Refocus the iframe when closing the lock modal 10 months ago
Weblate 0a8364cec4 Translated using Weblate (French) 10 months ago
Weblate 02540d2a23 Translated using Weblate (English) 10 months ago
yflory dba74df532 Fix lock modal 10 months ago
yflory e1119290df Better lock message 10 months ago
yflory 7bb59b518f Abort instantly when you want a lock but a global lock exists 10 months ago
yflory 7254d093a3 Fix checkpoint issues in onlyoffice 10 months ago
yflory 4484cda9b6 Use a new path to force a cache reload on onlyoffice 10 months ago
yflory 70aad68d07 New OnlyOffice build 10 months ago
yflory dee8007cc4 Fix worksheet IDs in onlyoffice 10 months ago
ansuz 019e5e708b wrap workers in a function scope and add a validateMessage method to HK's Env 10 months ago
ansuz 5467e1ffac replace ad-hoc response handler with Util.response 10 months ago
ansuz 9e85a1411e abstract the logic around worker choice out of message handling 10 months ago
ansuz 631ea54b49 lint compliance 10 months ago
ansuz 67dd4a608c Merge remote-tracking branch 'origin/scaling' into scaling2 10 months ago
ansuz 662ea86174 update chainpad-server dependency 10 months ago
yflory f7e0d03898 Merge branch 'staging' into ooBuild 10 months ago
ansuz b2fcde87d8 implement an admin command to update the default storage limit without a restart 10 months ago
yflory 923616aef0 Use each process in order instead of using a random one 10 months ago
ansuz f345998956 fix undefined reference 10 months ago
yflory fb0eb1b20c Use more subprocesses 10 months ago
yflory 0d636dabc9 Check signature for history keeper in a different process 10 months ago
ansuz e90ac69714 update changelog for 3.14.0 10 months ago
ansuz dc586f4887 Merge branch 'staging' of github.com:xwiki-labs/cryptpad into staging 10 months ago
ansuz 30f17040ac close streams when possible, simplify some code, enforce asynchrony 10 months ago
ansuz e2c748b6c7 use newest chainpad-server 10 months ago
ansuz 54420109ac update the open file limit in the example systemd service file 10 months ago
ansuz 2a9780abcf lint compliance 10 months ago
yflory ab6ccfe1f6 Merge branch 'staging' of github.com:xwiki-labs/cryptpad into staging 10 months ago
yflory cdcc52224a Reload when the API version changes 10 months ago
Weblate e32bfa2747 Translated using Weblate (German) 10 months ago
yflory 0074824a0a Fix XXX in jKanban 10 months ago
yflory fbae166702 Remove XXX 10 months ago
yflory abc27295ab Fix XXX in kanban 10 months ago
yflory 747657a559 Merge branch 'staging' of github.com:xwiki-labs/cryptpad into staging 10 months ago
yflory 15925f625d Fix keyboard shortcuts in kanban edit modal 10 months ago
yflory d69ad7c0e6 Send the error to chainpad when a patch is not sent 10 months ago
yflory 27c1291182 Fix duplicate text bug on reconnect or ACK timeout 10 months ago
ansuz b5560c279b label some issues to fix before the release 10 months ago
ansuz 0438c783b2 Merge branch 'soon' into staging 10 months ago
yflory 7b5a15cef7 Merge branch 'staging' into reconnect 10 months ago
yflory 578042154e Better reconnect after invalid lastKnownHash 10 months ago
ansuz a2b6501adb update package and footer version and add a dependency 10 months ago
ansuz 5d59b55799 lint compliance 10 months ago
Weblate 3a6884f566 Translated using Weblate (English) 10 months ago
Weblate 1e545049b5 Translated using Weblate (French) 10 months ago
David Benqué ed4de6a007 remove // XXX related to translation keys 10 months ago
Weblate d2135e5533 Translated using Weblate (English) 10 months ago
Weblate 139725e4fa Translated using Weblate (French) 10 months ago
yflory d53e9cff02 Fix 'clear filter' button 10 months ago
yflory a5aac22374 Merge branch 'staging' of github.com:xwiki-labs/cryptpad into staging 10 months ago
yflory 729a167e59 Confirm button in kanban edit modal 10 months ago
David Benqué 5bbfb4c71f Merge branch 'kanban_UI_fixes' into staging 10 months ago
David Benqué f6f499ee31 add padding to kanban container 10 months ago
David Benqué f8b8f09a74 highlight trash on drag 10 months ago
David Benqué fcb67d5b50 add alt text to edit card/board buttons 10 months ago
David Benqué 7bb0263f8f make tag input 100% width 10 months ago
ansuz 2d47e7e2cf implement an admin rpc to fetch ulimit data 10 months ago
ansuz 81c7416095 lint compliance 10 months ago
ansuz f644c24362 add an admin-rpc to count the number of open files 10 months ago
ansuz 1230057970 disable IS_PAD_PINNED 10 months ago
David Benqué e17544d168 add code block width 10 months ago
David Benqué a442f77851 style code blocks 10 months ago
ansuz 96b92f472c Merge branch 'soon' of github.com:xwiki-labs/cryptpad into soon 10 months ago
ansuz e1a0daac9c possible server fixes 10 months ago
ansuz bc034d95a3 enforce asynchrony in 'gethistoryOffset' 10 months ago
yflory b13f56247f Fix trim history 10 months ago
David Benqué cd5c9739ad move temp translation key 10 months ago
David Benqué a5767d8b68 fix hover color of markdown toolbar buttons 10 months ago
yflory 7c76e4544a Fix trim history 10 months ago
David Benqué f38caa0af5 move tag filter clear and add text 10 months ago
yflory e837853e04 Merge branch 'staging' of github.com:xwiki-labs/cryptpad into staging 10 months ago
yflory 2f80a32e15 Fix cursor not removed when another user leaves the kanban 10 months ago
yflory 1e877e1980 Keyboard shortcut to close kanban edit modal 10 months ago
yflory 62a12e8825 Cache an throttle the markdown rendering in kanban 10 months ago
ansuz 9cb92d2e6f Merge branch 'soon' into staging 10 months ago
ansuz e80a81a2bb Merge branch 'master' of github.com:xwiki-labs/cryptpad 10 months ago
ansuz d4038344ba stop iterating over tasks if you can't read the root directory 10 months ago
yflory 77894b5336 Fix relative links in kanban 10 months ago
yflory 7cd3701f26 Debug reconnect issues 10 months ago
David Benqué 291a84723f display cursors inline in modal notification. 10 months ago
David Benqué 928fe6c3dc change trash color 10 months ago
David Benqué 1d0e9f7392 adjustments to card body rendered markdown 10 months ago
David Benqué 6347b6193b align edit button to the top 10 months ago
yflory a0ab44c82d Use codemirror settings in kanban 10 months ago
yflory 7a91a6d606 Fix timeout bug 10 months ago
yflory 17be91f640 Fix flash of previous content in kanban codemirror 10 months ago
yflory 441b31969c Preserve horizontal scroll in kanban 10 months ago
David Benqué 5cc0161e45 fade [+] button in column footers 10 months ago
yflory 718cdee907 Check if the worker is available when focusing a tab again 10 months ago
David Benqué 7d0d701942 loop for column colors 10 months ago
yflory 21c3abde6f Fix kanban tags autocomplete 10 months ago
yflory 2f7e8fba79 Fix deletion from anon drive 10 months ago
yflory e31ddda8f5 Show all cards when all the selected tags have been deleted 10 months ago
ansuz e7baa79000
Merge pull request #503 from 101100/fix-warning 10 months ago
Jason Heard 9b3013278b Check httpSafeOrigin in config variable 10 months ago
yflory ab2337e2f6 Unselect tags that don't exist anymore on redraw 10 months ago
yflory 8bc8a6b08f Add link support 10 months ago
yflory f316c6b85a Fix losing scroll position on remote changes 10 months ago
yflory 4e6e6ffb01 Merge branch 'staging' of github.com:xwiki-labs/cryptpad into staging 10 months ago
yflory c178d5b54e Don't remember tag filter and view mode 10 months ago
yflory 342f0f741b Fix type error 10 months ago
yflory e9e65ae23b Fix minimum size of a column 10 months ago
yflory 84e3ed84a6 Resize vertical on edit modla codemirror 10 months ago
yflory 01ce4bcb95 Fix new item removed on remote change 10 months ago
yflory bf02f1f97e Fix outline in kanban 10 months ago
ansuz f2729ee9e2 Merge branch 'soon' into staging 10 months ago
yflory 31c994ae2f Fix blur 10 months ago
yflory e93339769f Fix tags UI 10 months ago
yflory f478c313c8 Fix palette for default color 10 months ago
yflory da7e00a9e6 Fix 'null' displayed in corner popup on Edge 10 months ago
yflory 2443c52d42 Fix checkmark in kanban markdown 10 months ago
yflory f2b3a711eb Fix uppercase tags in kanban 10 months ago
yflory 195ee1f1e0 Merge branch 'communities-kanban' into staging 10 months ago
yflory 6e9441ed39 Merge branch 'staging' of github.com:xwiki-labs/cryptpad into staging 10 months ago
yflory 3a5e9a25a7 Fix 3px margin error 10 months ago
ansuz 6b657c47ce change an XXX to a TODO 10 months ago
Weblate 51afa9207f Translated using Weblate (German) 10 months ago
yflory c8f0c99ddc Merge branch 'staging' of github.com:xwiki-labs/cryptpad into staging 10 months ago
ansuz 45601068ae lint compliance 10 months ago
ansuz 11369e553d downgrade an 'XXX' note to a 'TODO' 10 months ago
yflory 315704a1b8 Redraw access modal on metadata change 10 months ago
ansuz b9a07ca650 diagnose some unhandled errors in rpc tests 10 months ago
ansuz 461c809e4f update chainpad-server dependency 10 months ago
ansuz 4e8ebac3c1 guard against the possible non-existence of metadata for a channel 10 months ago
yflory 4690392cd9 Display other users' cursor 10 months ago
yflory 0674f410f5 Merge branch 'staging' into communities-kanban 10 months ago
yflory 6c5b3c8d14 lint compliance 10 months ago
yflory 56f5a75532 Drag and scroll inside boards 10 months ago
yflory b85d0fc9df Merge branch 'staging' into communities-kanban 10 months ago
yflory b768ac5a4a Remove debug logs 10 months ago
yflory 1402dc7170 Fix kanban edit modal issues 10 months ago
yflory 6d0154fb4d Use jscolor contrast values 10 months ago
yflory 03f65438fd Fix UI issues 10 months ago
yflory d73b32a05e Make sure users can't block themselves with SF allow list 10 months ago
yflory 835f531c9e Fix avatar issues 10 months ago
ansuz bb23b1249e update changelog for the northern white rhino release 10 months ago
ansuz e26ed7bb83 fix mermaid scroll jank 10 months ago
ansuz d4ca5e7d34 Merge branch 'staging' of github.com:xwiki-labs/cryptpad into staging 10 months ago
ansuz b32f3e34af fix local worker debugging on firefox 10 months ago
ansuz 140c8f1049 update chainpad-server dependency 10 months ago
David Benqué 5a88f6c526 change order of icons in context menu 10 months ago
Weblate f60a9dc575 Translated using Weblate (French) 10 months ago
Weblate 72acee6df8 Translated using Weblate (English) 10 months ago
yflory 58d69f9d74 Merge branch 'staging' of github.com:xwiki-labs/cryptpad into staging 10 months ago
yflory 43787e1140 Allow list reconnect 10 months ago
Weblate 8295b89515 Translated using Weblate (English) 10 months ago
Weblate c4392a90f9 Translated using Weblate (French) 10 months ago
yflory 06ae508250 Merge branch 'staging' of github.com:xwiki-labs/cryptpad into staging 10 months ago
yflory 53a6c3c334 Update unknown users in the access modal 10 months ago
Weblate bab3057f10 Translated using Weblate (English) 10 months ago
Weblate e0ce3121a5 Translated using Weblate (French) 10 months ago
Weblate b06143ae50 Translated using Weblate (French) 10 months ago
Weblate 6df1fb1d55 Translated using Weblate (English) 10 months ago
yflory dba2cb1860 Improve allow list UI 10 months ago
yflory 62725caace Fix allow list UI issues 10 months ago
ansuz 6d8cdca7a3 add 'rejected' field to metadata when getMetadata requests are blocked 10 months ago
David Benqué 330059e46e use a new key for log out everywhere 10 months ago
Weblate fdf4314d18 Translated using Weblate (German) 10 months ago
Weblate 6eea812ece Translated using Weblate (French) 10 months ago
Weblate dcb98a276d Translated using Weblate (English) 10 months ago
yflory bc6a329f06 lint compliance 10 months ago
yflory 7d19b9a205 Kanban migration to new format 10 months ago
yflory e139d46b26 Enable migration to new format 10 months ago
yflory fdea16d601 Tag filter 10 months ago
yflory 411b125b0f Fix board height 10 months ago
ansuz 6f7307c446 use loggedIn instead of accountName in the toolbar 10 months ago
ansuz e3042c94d8 make 'log-out-everywhere' end the local session as well 10 months ago
ansuz 9d8bb43d03 finally untangle metadata and index caches 10 months ago
yflory bd7d9a93c2 Add tags filter 10 months ago
yflory fbf4434fc4 Board color 10 months ago
yflory 0975f253ab Display body and tags 10 months ago
ansuz 32d769447a begin standardizing our method of streaming lines from files 10 months ago
ansuz 35eca2c5d2 pull file streaming out into its own file, leave a few notes 11 months ago
ansuz ccd6e1d6df lint compliance, simplify a test condition 10 months ago
ansuz ce6cf6f727 revert some changes that couldn't have possibly worked 10 months ago
ansuz c53f378582 Merge branch 'staging' of github.com:xwiki-labs/cryptpad into staging 10 months ago
yflory 97e45d91ee Merge branch 'staging' of github.com:xwiki-labs/cryptpad into staging 10 months ago
yflory 224b207a68 Fix migration from very old drive 10 months ago
yflory 04bd69f2f5 Add board button 10 months ago
yflory b31d024851 Horizontal scrollbar layout 10 months ago
yflory ea154e9ecc Add item button 10 months ago
yflory 9b54659e86 Fix default colors 10 months ago
yflory c220a7ecf4 Add colors to kanban 10 months ago
yflory 8c342862e7 Fix removing a board in kanban 10 months ago
ansuz b126e4456b simplify quota logic 10 months ago
ansuz 53ed247bc2 expose maxUploadSize and premiumUploadSize via /api/config 10 months ago
ansuz 50b9c27dcb finalize one more test 10 months ago
ansuz 8fbb32c5d7 add a few more checks to the rpc tests 10 months ago
ansuz 170aa6d47e clean up a few tasks related to allow lists 10 months ago
yflory 05708f6166 Make quick edit realtime too 10 months ago
yflory fd5e3b0fd5 lint compliance 10 months ago
yflory c42a7bff6f Make kanban edit modal realtime 10 months ago
yflory c8cca35073 Add edit modal in kanban 10 months ago
ansuz 92325a27f7 Merge branch 'soon' into staging 10 months ago
ansuz 57eb61aa17 update footer and package version for next release 10 months ago
ansuz e633f3b445 ignore some local scripts 10 months ago
yflory 8cb6e3c1b4 Delete boards 10 months ago
yflory 0fc5a456df Fix drag&drop issues 10 months ago
David Benqué 0a548cbedf Merge remote-tracking branch 'origin/staging' into staging 10 months ago
Weblate 4cdee4e973 Translated using Weblate (English) 10 months ago
Weblate e240c3d299 Translated using Weblate (French) 10 months ago
Weblate cd22968467 Translated using Weblate (Catalan) 10 months ago
ansuz 7f2d8e85b8 Merge branch 'small-config' into staging 10 months ago
yflory a901905ae5 Better move function 10 months ago
David Benqué 30f90876dd remove tooltip on usergrid avatars 10 months ago
yflory 1b2bb5b693 Fix concurrent move 10 months ago
yflory 889b18a1ff Drag to remove 10 months ago
David Benqué b42fbd4ee0 add explanatory text for owners tab in access modal 10 months ago
David Benqué b8c4e5d254 fix size of arrow button in access modal 10 months ago
ansuz ccd336d968
Update bug_report.md 10 months ago
ansuz 2f00ff3278 simplify check-in deactivation 10 months ago
ansuz 92896fb919 rearrange a big part of the example config file 10 months ago
ansuz c371a257bf WIP example config reorganization 10 months ago
yflory 0b5f76d471 Fix issues with the new structure and better restoreCursor 10 months ago
yflory 2e81605c95 New kanban structure with drag&drop support 10 months ago
ansuz e70c3ff0ab add some default config values and warn if provided values seem incorrect 10 months ago
David Benqué 8fd8881085 remove XXX relating to translation keys 10 months ago
ansuz 03ff9bd0d7 start deprecating usage of myDomain 10 months ago
ansuz 0bbf35205b
Update bug_report.md 10 months ago
ansuz 8219e5a903 Update issue templates 10 months ago
ansuz de6b9e2228 apply some defaults when loading the config 10 months ago
ansuz 94d8e7f2c2 provide a consistent type for 'customLimits' 10 months ago
ansuz 3cf09924ae fix default CSP headers 10 months ago
David Benqué e8fac0333a change order of contextual menu in Drive 10 months ago
yflory 60862d9f87 Fix isLoggedIn issue 10 months ago
ansuz 8d509fd6d5 Merge branch 'staging' into small-config 10 months ago
ansuz c26560e3f9 commit stashed admin-rpc refactor 10 months ago
ansuz 02fc343727 Merge branch 'staging' into small-config 10 months ago
ansuz 5eefc6d0aa Merge branch 'bigger-uploads' into staging 10 months ago
ansuz 74771f13f5 implement caching for /api/config responses 10 months ago
ansuz 0989595358 lint compliance 10 months ago
ansuz d2bae175c4 Merge branch 'staging' of github.com:xwiki-labs/cryptpad into staging 10 months ago
ansuz 3df47a1415 continued WIP config reorganization 10 months ago
ansuz 294a444603 WIP removing defaults from the example config file 10 months ago
ansuz 08941fa85b remove log statements from login process 10 months ago
yflory e18c224db4 Faster sort function in drive 10 months ago
yflory e84503bd52 fixFiles optimizations 10 months ago
ansuz 59ad80d7f1 support larger upload sizes for users with premium accounts 10 months ago
ansuz 87ef2b0899 Merge branch 'user-admin-logout-everywhere' into staging 10 months ago
ansuz 3f2ebb773b merge master back to staging 10 months ago
ansuz 542150b775 merge server components from staging 10 months ago
ansuz f951951077 merge communities-allow-list and lint compliance 10 months ago
yflory 43693b45f0 Merge branch 'master' of github.com:xwiki-labs/cryptpad 10 months ago
yflory ca10f15fe4 Merge branch 'soon' 10 months ago
yflory 4f9862de71 Fix trim history when owned by a team 10 months ago
ansuz f0e5fc7614 merge some missing code from communities-allow-list 10 months ago
ansuz ad08fe0c08 silence a few more routine websocket errors 10 months ago
ansuz 9c4693365a logout-everywhere from the userAdmin menu 10 months ago
ansuz 492e9496c8 Merge branch 'soon' of github.com:xwiki-labs/cryptpad into soon 10 months ago
ansuz 21cf2b70c5 update changelog for v3.12.0 (Megaloceros) 10 months ago
David Benqué e328ae97fd change owner icon 10 months ago
yflory 8eaf197b5b Disable 'make a copy' for spreadsheets in the drive 10 months ago
yflory 99f04c3d6f Merge branch 'staging' of github.com:xwiki-labs/cryptpad into staging 10 months ago
yflory f103a0fb08 Fix dropping files in non-ROOT categories in the drive 10 months ago
Weblate 20b591a39c Translated using Weblate (German) 11 months ago
yflory 6e6ba73eca Remove XXX 10 months ago
yflory c6deabe79c Fix order of 'copy' button in the toolbar 10 months ago
yflory 59d23ef5ff Fix issues with trim history button 10 months ago
ansuz 6f742fc295 WIP changelog 11 months ago
ansuz 021a6b361f remove a bogus XXX note 11 months ago
ansuz e871e1f1e2 bump footer version to 3.12.0 (Megaloceros) 11 months ago
ansuz 033bd361f9 add backwards compatibility with older servers 11 months ago
ansuz cac114bb52 merge staging into pending allow-list work 11 months ago
ansuz 668ea95de2 Merge branch 'soon' into staging 11 months ago
ansuz e8142d983b fix an invalid use of dropChannel 11 months ago
ansuz 386add7c58 fix an invalid use of dropChannel 11 months ago
ansuz 76d369bc83 Merge branch 'staging' of github.com:xwiki-labs/cryptpad into staging 11 months ago
ansuz 33c12c27a1 ignore successive metadata errors with the same message 11 months ago
ansuz 7ee8778210 kick unauthorized users from restricted channels 11 months ago
Weblate 78c292bc2e Translated using Weblate (Italian) 11 months ago
Weblate e292a0b295 Translated using Weblate (French) 11 months ago
Weblate 01ba2c4d40 Translated using Weblate (English) 11 months ago
Weblate 316d2a4689 Translated using Weblate (German) 11 months ago
yflory ff97c2cf63 Add (copy) to the title for copied pads 11 months ago
ansuz 77785e79af Merge branch 'communities-allow-list' of github.com:xwiki-labs/cryptpad into communities-allow-list 11 months ago
ansuz 075eddb1db guard against a typeError if there are no owners 11 months ago
ansuz fe0e02a421 restrict access to metadata 11 months ago
yflory cebb2d3900 Merge branch 'communities-allow-list' of github.com:xwiki-labs/cryptpad into communities-allow-list 11 months ago
yflory 0990148810 Fix team ownership in the access modal 11 months ago
yflory 3abe522a9f Make a copy 11 months ago
ansuz ad7096d754 Merge branch 'communities-allow-list' of github.com:xwiki-labs/cryptpad into communities-allow-list 11 months ago
ansuz 42ffb4efb6 Merge branch 'staging' into communities-allow-list 11 months ago
ansuz 791aad53f2 WIP allow list changes 11 months ago
yflory 2cceb54aac Fix cornerPopup timeout not always applied 11 months ago
yflory d0bce11c21 Fix errors when an anonymous user leaves the shared worker 11 months ago
yflory d3939e87b2 Fix readOnly mode in spreadsheets 11 months ago
ansuz 4fee896b5d update chainpad-listmap's dependency chain 11 months ago
ansuz c8b0907fbe Merge branch 'staging' of github.com:xwiki-labs/cryptpad into staging 11 months ago
yflory e6c948febb Display ERESTRICTED error in the UI 11 months ago
yflory 866ce6fb7d Add translation key 11 months ago
yflory 5a189862eb Fix color 11 months ago
yflory 9d5bf7e1bb Disable allow list for files and shared folders 11 months ago
ansuz 597f417ad6 WIP block history requests that bypass JOIN restriction (where appropriate) 11 months ago
ansuz 79bc8830ef disable access restriction if all owners are removed 11 months ago
ansuz f579c9b059 semi-functional allow-list implementation in historyKeeper 11 months ago
yflory 521db379a0 Add access button in all the apps 11 months ago
yflory 620995506e lint compliance 11 months ago
yflory 43492b6df5 Add mute pad option 11 months ago
yflory b4ca18d7c2 Fix access modal bug 11 months ago
yflory 57c360ebf6 Refresh access modal when other tabs make changes 11 months ago
yflory bd0e4e66bb Fix allow list issues 11 months ago
yflory d0eb96815d Merge remote-tracking branch 'origin/communities-allow-list' into communities-allow 11 months ago
yflory e861cd3ab7 lint compliance 11 months ago
yflory 5f8ae61bd4 Add request access button to the access modal 11 months ago
yflory ef4188db5b Fix translation keys and fix UI 11 months ago
yflory 175423fee4 Add translation keys 11 months ago
yflory 17113f298c Display allow list state in access modal 11 months ago
ansuz 0edcdcb1b8 oups! fix a missing callback that's been here for a while... 11 months ago
ansuz 27e57e7af0 oups! fix a missing callback that's been here for a while... 11 months ago
ansuz 505e383f9f test restricted access from the command line 11 months ago
ansuz eac3e4cbcd sketch out some historyKeeper functionality related to access lists 11 months ago
ansuz d5e8fc35f7 Merge branch 'mkTimeout' into communities-allow-list 11 months ago
ansuz 156c37942d implement new metadata commands related to allow lists and mailboxes 11 months ago
yflory 04009bf9bb Merge branch 'staging' of github.com:xwiki-labs/cryptpad into staging 11 months ago
yflory d48cecf28e Load mailboxes after teams 11 months ago
David Benqué 72564a7cfe Merge remote-tracking branch 'origin/staging' into staging 11 months ago
Weblate 70b87f31dd Translated using Weblate (French) 11 months ago
Weblate 7cf9758428 Translated using Weblate (English) 11 months ago
Weblate 9943e3be78 Translated using Weblate (Italian) 11 months ago
yflory d6c3ad1597 Merge branch 'staging' of github.com:xwiki-labs/cryptpad into staging 11 months ago
yflory f04a6056ae Fix race condition with team access rights #497 11 months ago
yflory 3647d583f8 Add allowList tab 11 months ago
David Benqué 99949ce838 Merge remote-tracking branch 'origin/staging' into staging 11 months ago
David Benqué 1f99c185f6 add new error key 11 months ago
David Benqué 64964e1f80 remove XXXs related to keys 11 months ago
yflory 0a45d48710 Improve ownership UI 11 months ago
yflory 81a0bbb0ef Add owners tab 11 months ago
ansuz ecce654ca6 add 'resource:' to script-src to enable shared-worker debugging in firefox 11 months ago
ansuz a280cc85d2 Merge branch 'staging' into communities-allow-list 11 months ago
ansuz 626a031115 resolve conflict with master/staging 11 months ago
ansuz ff73e96cb8 reimplement the trim history fix from staging 11 months ago
ansuz a4be6185de merge staging and do a little lint compliance 11 months ago
ansuz f478ae725d Merge branch 'staging' into communities-allow-list 11 months ago
ansuz 79f1280cf3 Merge branch 'soon' into staging 11 months ago
ansuz b56367414b don't overwrite cached indices when new users join a channel 11 months ago
ansuz 253ea0d336 Merge branch 'staging' into communities-allow-list 11 months ago
ansuz e8949168ec lint compliance 11 months ago
ansuz 5dff6535ed add a simple guard against unparsed messages when trimming history 11 months ago
ansuz 8694c17023 sketch out metadata commands for allow lists 11 months ago
ansuz f86196e40a implement shared environment between historyKeeper and RPC 11 months ago
yflory 4ec6d8072c Merge branch 'staging' into communities-allow 11 months ago
ansuz 8700345ccc add missing connect-src directives to example nginx conf 11 months ago
ansuz 1fc8c1de16 add missing connect-src directives to example nginx conf 11 months ago
yflory de820dc0d1 Split properties modal into 2 modals 11 months ago
yflory ff895bbc82 Move inner files 11 months ago
ansuz d9ab8d3f62 lint compliance 11 months ago
ansuz 3f606d8c75 remove some duplicated code 11 months ago
ansuz d274be6de4 remove ancient import script 11 months ago
ansuz 725d10fc60 nest storage directory inside './lib' 11 months ago
ansuz 58193a1969 Merge branch 'staging' of github.com:xwiki-labs/cryptpad into staging 11 months ago
ansuz 38c1700173 Respond to pinning RPCs as soon as possible 11 months ago
yflory 10f97354fe Fix text duplication bug #352 11 months ago
ansuz cded52f83f replicate existing pinned.load API correctly 11 months ago
yflory adc64f0c6e Merge branch 'staging' of github.com:xwiki-labs/cryptpad into staging 11 months ago
yflory 1d24c85ca4 Add author and time of patches in debug app 11 months ago
ansuz 65f88617cf add support for an optional handler for each pin log 11 months ago
ansuz 2df65ed446 implement 'mkTimeout' method in common-util 11 months ago
ansuz a172bad30f drop unnecessary reference to 'window' in Util.throttle 11 months ago
ansuz 3dc789cbca replace pinned.js with low-profile streaming implementation 11 months ago
ansuz 609eddc9e1 reimplement the the Pinned.load API 11 months ago
ansuz 2345323f0d lint compliance 11 months ago
ansuz 2d6626234b yet another async scheduler, this time supporting flexible runtime control flow based on declarative priority levels 11 months ago
yflory b7b2685f14 Merge branch 'staging' of github.com:xwiki-labs/cryptpad into staging 11 months ago
yflory 723ed5248f Fix double tippy in drive elements 11 months ago
ansuz 4b66899956 Merge branch 'soon' into staging 11 months ago
ansuz 06d3ed93e8 finish changelog for 3.11.0 (LabradorDuck) 11 months ago
yflory f8ddb6b7bc Fix anchor link in code 11 months ago
yflory a6e3208dff lint compliance 11 months ago
yflory d58244b4c4 Support relative url and anchor url in code 11 months ago
yflory bba769ef77 Use throttle to link to support tickets 11 months ago
yflory a9e4af1d12 Merge branch 'staging' of github.com:xwiki-labs/cryptpad into staging 11 months ago
yflory 5d1e1c975c Link to a support ticket 11 months ago
ansuz 672725629c Merge branch 'communities-trim' into merge-trim 11 months ago
yflory 093816cbb2 lint compliance 11 months ago
yflory 5a2b36d443 Merge branch 'soon' of github.com:xwiki-labs/cryptpad into soon 11 months ago
ansuz c9db3aae78 Merge branch 'staging' into soon 11 months ago
yflory 778be9446d Fix drive refreshing the UI twice at startup 11 months ago
yflory 415fc27539 Pin todo and trim its history 11 months ago
yflory 035707bd38 Fix freeze effect after typing a wrong password in login/register 11 months ago
yflory 72d41e34fe lint compliance 11 months ago
yflory 0617d9efc5 Change UI for account migration in settings 11 months ago
yflory b585cbba42 Improve trim history UI when completed 11 months ago
yflory 0f697ac865 Merge branch 'staging' into communities-trim 11 months ago
ansuz bd163e65f3 Merge branch 'staging' into soon 11 months ago
ansuz 93abdff397 set codename 'LabradorDuck' for 3.11.0 11 months ago
ansuz 9cac465b08 add missing font-awesome share icon to the safe link hint 11 months ago
ansuz 2a6afcc176 Merge branch 'staging' of github.com:xwiki-labs/cryptpad into staging 11 months ago
ansuz 1be01c07ee fix bad copypaste in the example nginx file 11 months ago
yflory 7fdf67b438 Merge branch 'staging' of github.com:xwiki-labs/cryptpad into staging 11 months ago
yflory 44de8a41a1 Improve confirm modal keydown listeners 11 months ago
Weblate 4f9d0d71f9 Update translation files 11 months ago
Weblate 8f06090626 Update translation files 11 months ago
Weblate fbf1e287e9 Update translation files 11 months ago
Weblate 985ed2505d Translated using Weblate (German) 11 months ago
Weblate 80ae86cda3 Update translation files 11 months ago
Weblate be929ecc8f Update translation files 11 months ago
Weblate ac39a97a0f Update translation files 11 months ago
Weblate 9c6109799e Update translation files 11 months ago
Weblate b73ef31a2c Update translation files 11 months ago
Weblate d372f436d8 Update translation files 11 months ago
Weblate 67a837349b Update translation files 11 months ago
yflory c0e9e86a9e Merge branch 'staging' of github.com:xwiki-labs/cryptpad into staging 11 months ago
yflory 532333d427 Fix keyboard shortcut in confirm 11 months ago
ansuz 0e9ba3a88d Merge branch 'staging' of github.com:xwiki-labs/cryptpad into staging 11 months ago
ansuz bc2febfb8b bump version, drop flow dependency, use newer chainpad-server 11 months ago
yflory a86469206e Fix horizontal scrollbar again 11 months ago
yflory 26d05920c2 Merge branch 'staging' of github.com:xwiki-labs/cryptpad into staging 11 months ago
yflory bf7f2e5bcd Fix upload table horizontal scrollbar + add autoscroll 11 months ago
ansuz 1231cacef1 remove XXX 11 months ago
ansuz 0c2e5d14b3 Merge branch 'staging' of github.com:xwiki-labs/cryptpad into staging 11 months ago
ansuz 63a36df39b push the pending upload table below the file upload dialog 11 months ago
yflory f9eea74b54 Merge branch 'staging' of github.com:xwiki-labs/cryptpad into staging 11 months ago
yflory 6a365da3d8 Merge branch 'sidebarlayout' into staging 11 months ago
yflory 38ac562de2 Merge branch 'sidebarlayout' of github.com:xwiki-labs/cryptpad into sidebarlayout 11 months ago
yflory 3d2538c3ca Fix narrow sidebar in teams 11 months ago
ansuz cff663e876 Merge branch 'staging' of github.com:xwiki-labs/cryptpad into staging 11 months ago
ansuz 57be324850 remove some XXX notes 11 months ago
yflory 4cd39f4f32 Merge branch 'staging' of github.com:xwiki-labs/cryptpad into staging 11 months ago
yflory 40b144f87b remove XXX 11 months ago
David Benqué 1c9132adcb Merge remote-tracking branch 'origin/staging' into staging 11 months ago
David Benqué a328dc6c08 fix text input color in team chat 11 months ago
yflory a333531a36 Merge branch 'staging' of github.com:xwiki-labs/cryptpad into staging 11 months ago
yflory b69261d119 Display the share modal after folder conversion to sf 11 months ago
ansuz 68a77885d4 suppress some routine server logs 11 months ago
yflory 830c401f50 Remove invalid option in context menu 11 months ago
yflory d0f8458dd2 Fix openincode in anon shared folders 11 months ago
ansuz 7f22da0f57 fix use of incorrect key format in admin-rpc 11 months ago
ansuz 86b9cc2de1 accidentally merge uncommitted work with staging, resolve merge conflicts 11 months ago
yflory 8afb0255c1 Fix team creation 11 months ago
yflory 09076f39ff Fix and reenable folder to SF conversion 11 months ago
David Benqué fbd8700a18 fix highlighting of support messages 11 months ago
yflory 403867d4cd Merge branch 'staging' of github.com:xwiki-labs/cryptpad into staging 11 months ago
yflory 7ca92526e7 Remove 'Owned pads' category from the drive 11 months ago
yflory 243abc3e53 Change default settings for hidden hash 11 months ago
David Benqué 6c23afdcc6 fix max-width 11 months ago
David Benqué 961cb8e427 apply max-width only to settings 11 months ago
yflory 4d2538c796 Make hidden hashes disabled by default 11 months ago
ansuz 7000be0e70 clean up historykeeper 11 months ago
ansuz 802034616c centralize historykeeper-rpc interaction in rpc methods 11 months ago
David Benqué f23649c1ce remove obsolete keys 11 months ago
Weblate 1332f33a1a Translated using Weblate (French) 11 months ago
Weblate 49294fc0c3 Translated using Weblate (English) 11 months ago
yflory 7d0dbe5d09 Remove XXX 11 months ago
yflory 1a1620f76d Merge branch 'staging' of github.com:xwiki-labs/cryptpad into staging 11 months ago
yflory ea5c6defe6 Fix import mediatag to the drive 11 months ago
David Benqué fede44c8e5 remove XXXs related to translation keys 11 months ago
David Benqué f9b101dcd9 use the same key to say 'don't show again' 11 months ago
Weblate 4cf834fb22 Translated using Weblate (English) 11 months ago
Weblate 164e5d63d2 Translated using Weblate (French) 11 months ago
Weblate 961f475dcd Translated using Weblate (Catalan) 11 months ago
David Benqué d9f2c90c25 fix color of expiration warning in sheets 11 months ago
yflory 9ba786df4c Fix avatar in notification history 11 months ago
David Benqué 0efbc77370 add max-width to settings 11 months ago
yflory 9e60c8eb93 Show more than 5 entries in the upload table 11 months ago
yflory 331054870f Better sidebar layout for mobile 11 months ago
yflory 12a45377e7 Fix password change issues with read-only pads or from the drive 11 months ago
yflory 086e500b8e Fix hidden hash error with password change 11 months ago
yflory 872896543f Merge branch 'communities-trim' of github.com:xwiki-labs/cryptpad into communities-trim 11 months ago
yflory 5d77857012 Merge branch 'staging' into communities-trim 11 months ago
yflory 753fdbdc12 Display error for pad history trim 11 months ago
yflory 8d01711244 Disable trim history is nothing to remove 11 months ago
yflory b8ee120b49 lint compliance 11 months ago
yflory 9c6ab86e4d Fix syntax error 11 months ago
yflory fd095644a2 Merge branch 'staging' into communities-trim 11 months ago
yflory 64c0f3a90d Add missing RPC command 11 months ago
yflory d8199b8274 Update UI for trim history 11 months ago
yflory 9a857ea058 Merge branch 'staging' into communities-trim 11 months ago
yflory b96079cd44 Remove hidden hash XXX 11 months ago
ansuz b4172676f3 lint compliance 11 months ago
ansuz 3e06c4bfe3 resolve conflict and lint compliance 11 months ago
ansuz 3ab83826e6 WIP changelog 11 months ago
ansuz 342aea705a not about a history-keeper anti-pattern 11 months ago
ansuz f17d14fd99 simpler limit updates 11 months ago
ansuz bde17a62a1 continue refactoring rpc 11 months ago
ansuz d17e180420 add a short delay in the rpc test to give the server time to write metadata to the disk before checking it 11 months ago
ansuz fa525817ba fix broken removePins RPC 11 months ago
ansuz a00a9fd018 don't hang when reading unparseable lines from history 11 months ago
ansuz 14a67b5600 fix broken removePins RPC 11 months ago
ansuz b7ca39715d don't hang when reading unparseable lines from history 11 months ago
ansuz 9395a1ef1a always write lines with a newline 11 months ago
yflory 1d56c08700 Merge branch 'staging' into communities-trim 11 months ago
ansuz 3741cbcbc0 rewrite trimmed history WITH newlines 11 months ago
yflory 4079f1bfdb Fix RPC command 11 months ago
yflory 6655f493e0 Merge branch 'staging' into communities-trim 11 months ago
yflory e6709f03aa Add trim history RPC command 11 months ago
yflory 6db7fbac0f lint 11 months ago
ansuz 5808c534b5 fix function signature for trimHistory RPC 11 months ago
yflory c9676e1c91 Merge branch 'staging' into communities-trim 11 months ago
ansuz 49b082c032 Merge branch 'staging' of github.com:xwiki-labs/cryptpad into staging 11 months ago
ansuz 9c28941f6c lint compliance 11 months ago
yflory 1c013d8a4f Fix stacking disconnection alerts 11 months ago
yflory 768dc71803 Fix remote changes in codemirror hijacking cursor 11 months ago
yflory a973f907e5 Merge branch 'cornerPopup' into staging 11 months ago
yflory 78795a3b4d Login or register in profile for anonymous users 11 months ago
David Benqué 2cf578c790 Merge remote-tracking branch 'origin/staging' into staging 11 months ago
David Benqué 443fb6e22d use variable for password input height 11 months ago
ansuz f94713ceca update package-lock to use latest chainpad-server 11 months ago
yflory 2ee38ccc42 lint compliance 11 months ago
yflory 653d58433e Add link to profile in notifications 11 months ago
yflory 5cb266838a Fix syntax error 11 months ago
ansuz 0d69620687 tweak the metadata line handler to handle an edge case in trim history 11 months ago
ansuz 6ae6445d95 fix merge conflict 11 months ago
ansuz f8f3a48e8b use latest chainpad-server 11 months ago
yflory d065a3d116 Add transparency 11 months ago
yflory 5c9b387b6b Update secondary danger button colors 11 months ago
yflory 2e631a8b5f New UI tool: confirm button 11 months ago
yflory 689ea40a92 Compute history size in the worker 11 months ago
yflory d736f783e7 Add txid in GET_HISTORY 11 months ago
yflory a9f8402110 Fix clear history in contacts 11 months ago
ansuz 46dfa026f0 fix an API change that caused a typeError 11 months ago
ansuz 88be40ede3 standardize some function signatures and factor out a lot of boilerplate 11 months ago
ansuz d1c6e67d17 throw if you try to mkAsync a non-function 11 months ago
ansuz 65ba85d97b clear historyKeeper cache when we trim a channel 11 months ago
ansuz 43307ffb1a define all server intervals in a map so we can easily clear them all 11 months ago
ansuz 3601bd6429 leave an XXX note to make sure we fix this typeError 11 months ago
ansuz 6523974ca2 fix a WRITE_PRIVATE_MESSAGE rpc regression 11 months ago
ansuz 4e71b63331 Merge branch 'consolidated-api' into trim-history 11 months ago
ansuz 24b4931816 Merge branch 'staging' of github.com:xwiki-labs/cryptpad into trim-history 11 months ago
ansuz e3269df7f0 fix some critical errors in the trim history storage api 11 months ago
ansuz 779e817443 stop relying on netflux-server internals 11 months ago
yflory c58a50081b lint compliance 11 months ago
yflory 4ac2f64726 Add history trim in the properties modal 11 months ago
ansuz 06c29ef1d1 latest api changes to match the netflux-server refactor 11 months ago
yflory 4680de12ee New UI for the corner popup 11 months ago
yflory 5ead391706 Add comments and improve trim history warnings 11 months ago
yflory 53f19920f0 History trimming in settings 11 months ago
yflory b28f262c86 History trimming placeholder 11 months ago
yflory 19c329f6e7 Trim history option in settings (UI only) 11 months ago
yflory 214d4e8e42 Remove deprecated option in settings 11 months ago
yflory cfa4c6e7bb Fix type error in mailbox 11 months ago
yflory 4e7bb57129 Fix debug app 11 months ago
yflory 5673aa687c Unlock onlyoffice's cells sooner 11 months ago
yflory 2060223edb Remove tippy in TOC 11 months ago
yflory 5e463258eb Test onlyoffice lock 11 months ago
yflory 9345747d19 OnlyOffice reconnect: force a page reload in we have pending changes 11 months ago
Weblate 69a08be7f9 Translated using Weblate (German) 11 months ago
Weblate d8f86b646a Translated using Weblate (French) 11 months ago
yflory 29d4da13eb lint compliance 11 months ago
yflory e90bdb8b7e Fix indentation 11 months ago
yflory f253e19575 Merge branch 'communities-hash' into staging 11 months ago
yflory 464eaee49a Restore full hash when safe hash is deleted from the drive 11 months ago
yflory 9961bffd48 Fix XXX in sframe-common-outer 11 months ago
yflory d979131a85 Add XXX 11 months ago
yflory 31f05a591c Merge branch 'staging' of github.com:xwiki-labs/cryptpad into staging 11 months ago
yflory 0a543ae6ea Enable onlyoffice reconnection 11 months ago
yflory e32398b308 lint compliance 11 months ago
yflory 745bcd7363 Fix function name 11 months ago
Weblate 0008365f74 Update translation files 11 months ago
Weblate a4f99c3114 Translated using Weblate (French) 11 months ago
Weblate 5ec884ad1c Update translation files 11 months ago
David Benqué 808317f8c1 Merge remote-tracking branch 'origin/staging' into staging 11 months ago
David Benqué 4d986cd60f corrections 11 months ago
ansuz 3e3c495e82
Merge pull request #495 from xwiki-labs/differentiate-admin-messages 11 months ago
yflory b84299ff89 Move elements in settings 11 months ago
yflory f0f91b7ab2 Create the folders recursively if a new pad path doesn't exist 11 months ago
yflory 61d937d601 Display error screen when sharing a hidden hash 11 months ago
yflory 6183401a6f Add settings to continue using unsafe links 11 months ago
yflory 0237bb2867 Fix read-only pads 11 months ago
ansuz b922860339 drop usage of historyKeeper.setConfig 11 months ago
ansuz 80c012f34d prepare to merge history keeper and rpc 11 months ago
yflory ea65647d44 lint compliance 11 months ago
yflory 50b897ee2e Hide the hash with autostore popup + fix anon shared folders 11 months ago
yflory 718610b6db Use the hidden hash when opening a pad from the drive 11 months ago
yflory 83c35543b9 Keep the hash in the URL while the pad is loading 11 months ago
yflory 02200ff403 Fix burn after reading not hidden for shared folders 11 months ago
yflory deddc80270 lnit compliance 11 months ago
yflory 7a02b074b7 Hidden hash for files 11 months ago
David Benqué cd586b626d unify keys that say 'X is/is not in your contacts' 11 months ago
David Benqué beaea7bb74 change friends to contacts 11 months ago
yflory 7b9f86157e Use version 3 for hidden hashes 11 months ago
yflory 2c1e26cb52 Remove # symbol when no hash 11 months ago
yflory a8e6250576 Hidden hash for shared folders and team invitation 11 months ago
yflory 0ad96e0966 Hide the crypto keys from the hash 11 months ago
Weblate b8ec7178da Translated using Weblate (Italian) 11 months ago
Weblate 27f864128d Translated using Weblate (German) 11 months ago
Weblate 6f6bbaf75e Translated using Weblate (French) 11 months ago
Weblate b6a6249eb4 Translated using Weblate (Finnish) 11 months ago
Weblate 66ef508e0e Translated using Weblate (English) 11 months ago
Weblate 0158ce6804 Translated using Weblate (Catalan) 11 months ago
ansuz b093d3f0d2 WIP massive rpc refactor 1 year ago
ansuz ceb351326c split out some more rpc functionality and fix broken module paths 1 year ago
ansuz c1f222dd6c move metadata commands from rpc to their own module 1 year ago
ansuz bb7e8e4512 move login block functionality into its own rpc module 1 year ago
ansuz c765362744 move more rpc functionality into modules 1 year ago
ansuz c93b39c094 separate more rpc functionality into pinning and core submodules 1 year ago
ansuz 4fd68b672e drop clientside hooks wrappers for authenticated GET_FILE_SIZE 1 year ago
ansuz 6b5118cdc3 add an npm script to lint only server components 1 year ago
David Benqué 9a53b3b9fd style messages 1 year ago
ansuz ba6e3f33bd move admin commands into their own module 1 year ago
ansuz 1ecb61fe85 put an ugly red border on support thread messages from admins 1 year ago
yflory 009bbd69bd Fix import button in onlyoffice 1 year ago
yflory 4a2b0fc114 Allow ooslide and oodoc imports 1 year ago
ansuz 39b0785406 apply custom limits immediately at startup 1 year ago
ansuz 9cdf54aff2 untested implementation of trimHistory 1 year ago
ansuz c388641479 drop support for 'retainData' configuration 1 year ago
ansuz f45de2b52f move some server deps from repo root to lib/ 1 year ago
ansuz c4194117a7 ever so slightly faster direct message handler 1 year ago
ansuz c146125283 sketch out a method for graceful shutdown 1 year ago
ansuz 873a7c7c84 remove some flow annotations 1 year ago
ansuz 88dcadcb1b sketch out trimHistory interface 1 year ago
ansuz 47290fca1e leave some notes about something that was tricky to read 1 year ago
ansuz 75f1f8c40b refactor some methods that will be used 1 year ago
ansuz b585dd998d throw in a little asynchrony 1 year ago
ansuz a0f1680e85 Merge branch 'staging' into trim-history 1 year ago
yflory dd94091fae New OO build, delete unused files 1 year ago
yflory 0f085e22dc Update inner.js to use the new OO fixes 1 year ago
yflory e3f5c89333 Remove window.location.hash and window.location.href from common-hash 1 year ago
ansuz 99beeca8fd update changelog for Kouprey release (3.10.0) 1 year ago
yflory a9b7b51547 Check if fileType is an array 1 year ago
yflory cdbe4ed8eb Only allow image upload in onlyoffice 1 year ago
yflory 6fd079bfb0 Clean images in onlyoffice 1 year ago
yflory ad94e97444 Fix missing callback 1 year ago
yflory 1067389e37 lint 1 year ago
yflory 7b026868fd Fix images in onlyoffice 1 year ago
yflory b76d308db8 Move new onlyoffice code 1 year ago
ansuz 0ad314c1ce Merge branch 'soon' into staging 1 year ago
yflory af8bda0755 Merge branch 'staging' of github.com:xwiki-labs/cryptpad into staging 1 year ago
yflory 0fd711aebc Remove existing style in the default empty spreadsheet 1 year ago
yflory ad8bf87c3f Ability to force the version of onlyoffice 1 year ago
Weblate 16744cd140 Translated using Weblate (German) 1 year ago
ansuz 189dc0db71 Merge branch 'staging' of github.com:xwiki-labs/cryptpad into staging 1 year ago
ansuz 95a2742b7a add an example systemd service file 1 year ago
ansuz 47d9c4daeb update onlyoffice detection to support sheet migration 1 year ago
yflory 746b382166 Merge branch 'staging' of github.com:xwiki-labs/cryptpad into staging 1 year ago
yflory 47e3cffde7 Fix race condition onlyoffice 1 year ago
David Benqué 7e4ceeb192 Merge remote-tracking branch 'origin/staging' into staging 1 year ago
David Benqué 7dd1602060 lint 1 year ago
Weblate 03ad3218de Translated using Weblate (German) 1 year ago
Weblate f3867c9d74 Translated using Weblate (French) 1 year ago
Weblate 2cdfbe7104 Translated using Weblate (English) 1 year ago
yflory a6425fd38e Update jshintignore 1 year ago
yflory 0f41dddf92 Fix OnlyOffice UI in readonly mode 1 year ago
yflory ddb3738574 Add translation keys 1 year ago
yflory 1d67e421d0 temp 1 year ago
yflory a7a91fb0a5 Add back old OO 1 year ago
ansuz 671999c600 treat onlyoffice iframes specially with regard to CSP 1 year ago
David Benqué a5265e4c61 fix margin on OO export format alert 1 year ago
yflory 0ece81afff Merge branch 'communities-oo' into staging 1 year ago
Weblate d17c115a23 Translated using Weblate (French) 1 year ago
Weblate 952708113c Translated using Weblate (English) 1 year ago
yflory 404c2a1467 Prevent importing xlsx with non-supported browsers 1 year ago
yflory 4a4146f39d Fix burn after reading with onlyoffice 1 year ago
David Benqué 003a616c81 remove XXXs 1 year ago
ansuz 698c2f946e update footer and package.json version to 3.10.0 (Kouprey) 1 year ago
ansuz 10cdd5c76b Merge remote-tracking branch 'origin/communities-oo' into staging 1 year ago
ansuz a19cebd42f Merge branch 'staging' of github.com:xwiki-labs/cryptpad into staging 1 year ago
ansuz e6d5b0886e evict unpinned blobs and blob proofs 1 year ago
Weblate 611aa0be25 Translated using Weblate (German) 1 year ago
Weblate d4a5f194ee Translated using Weblate (Finnish) 1 year ago
yflory dbb726e4ce Pin images included in the spreadsheets 1 year ago
David Benqué f189ba8822 fix translation key for access warning 1 year ago
yflory 7f4dbd3245 Fix text art in spreadsheets 1 year ago
yflory 52e9d2c87a Change Sheet app colors 1 year ago
yflory a56127daf4 lint compliance 1 year ago
yflory 079071bd87 Merge branch 'staging' into communities-oo 1 year ago
yflory 6d216ba1c2 Merge branch 'staging' of github.com:xwiki-labs/cryptpad into staging 1 year ago
yflory e8b905282a Add config option to link to an imprint 1 year ago
yflory a7ffc038d9 Improve import/export office documents 1 year ago
yflory 5a26be5531 Improve import and export UI. Fix race condition with office import. 1 year ago
yflory bc8fee8a29 Merge branch 'staging' into communities-oo 1 year ago
David Benqué 0bcb89310c remove // XXXs for translation keys 1 year ago
Weblate bca9d223bb Translated using Weblate (Spanish) 1 year ago
Weblate 6a64dd56b8 Translated using Weblate (Italian) 1 year ago
Weblate 12bfea64e2 Translated using Weblate (German) 1 year ago
Weblate 3a66298b3e Translated using Weblate (French) 1 year ago
Weblate d92b134a94 Translated using Weblate (Finnish) 1 year ago
Weblate 4a25298648 Translated using Weblate (English) 1 year ago
Weblate f07a5bc0cd Translated using Weblate (Catalan) 1 year ago
ansuz 50ea551b36 Merge branch 'staging' into trim-history 1 year ago
ansuz 66c857d61e tiny optimization which saves a little bit of memory usage for a little bit of time 1 year ago
ansuz 9de073c269 finally get around to reorganizing the messiest part of history keeper 1 year ago
ansuz 8c5c643a25 finally get around to reorganizing the messiest part of history keeper 1 year ago
ansuz 4418f6a113 tiny optimization which saves a little bit of memory usage for a little bit of time 1 year ago
David Benqué d1b8d8668e linting 1 year ago
yflory cbe407084d Remove XXX 1 year ago
yflory cb6f0513a2 Fix extension selector in pad export 1 year ago
yflory 47768112b4 Merge branch 'staging' into communities-oo 1 year ago
yflory b80d3cf24f Ability to provide a dropdown container to a text input. Applied to framework export. 1 year ago
yflory 9db30aff1f Improve UI 1 year ago
yflory 663fa9b474 Fix race condition with images 1 year ago
yflory 50a039b40b Hide unused tabs 1 year ago
yflory b14f6e4030 Fix image upload 1 year ago
yflory 9abeada1f0 Merge branch 'staging' of github.com:xwiki-labs/cryptpad into staging 1 year ago
ansuz 6a52b97cd8 Merge branch 'staging' into trim-history 1 year ago
ansuz ff40538ee7 sketch our two new RPCs for trimming history 1 year ago
ansuz 15ca855f22 start using the scheduler for all relevant database methods 1 year ago
ansuz 7072fe4fa4 implement and test a complex constrained scheduler 1 year ago
yflory 9e1f49c177 Enable spreadsheets for anonymous users 1 year ago
yflory aa27eabff1 lint compliance 1 year ago
yflory a1a2e6659e retab + jshint 1 year ago
yflory 4e537e6ddc retab 1 year ago
David Benqué 33fe090dcf Merge branch 'bar' into staging 1 year ago
David Benqué 332f9e37aa Merge remote-tracking branch 'origin/bar' into bar 1 year ago
David Benqué 9e9a2dafaa fix warning paragraph spacing 1 year ago
yflory 884350f8f7 Merge branch 'staging' of github.com:xwiki-labs/cryptpad into staging 1 year ago
yflory bd90df9d1e Merge branch 'bar' into staging 1 year ago
yflory e5865f8653 Merge branch 'bar' of github.com:xwiki-labs/cryptpad into bar 1 year ago
yflory d02092eb76 Fix cache and storage issues in share and filepicker iframes 1 year ago
yflory 73af078e4a Hide share modal when pad is deleted 1 year ago
ansuz 47fdf9de9f add a note to fix some odd behaviour 1 year ago
ansuz c89595c7bb handle all the simple cases where operations on channels should be queued 1 year ago
ansuz 10eed5c46d drop unmaintained flow annotations 1 year ago
David Benqué df03ddcca0 temporary translation keys 1 year ago
yflory 1e6e9fd288 Remove deleted pad from the drive 1 year ago
yflory 09da8ac6a2 Warn the owners when deleting a BAR pad 1 year ago
David Benqué ba43bb9f07 adjust font size of password 1 year ago
David Benqué e3aa814c0a style of loading screen messages and password form 1 year ago
David Benqué ddb204def4 Change temporary text 1 year ago
ansuz b5b3e99e56 use correct function signature for Pinned.load 1 year ago
yflory 9f8f6399d8 Add finnish translation 1 year ago
yflory bdd338902b Hide radio buttons in share modal when no contacts 1 year ago
David Benqué f1d1690cf8 access rights buttons 1 year ago
David Benqué 7042b9c2d7 style loading screen message 1 year ago
David Benqué 5ee12f8da7 hard coded keys for testing 1 year ago
yflory a3b3a9e4fb Fix button not updating when generating a BAR url 1 year ago
yflory 1bf48a5a8c lint compliance 1 year ago
yflory 9ee9e46087 Receiving a burn after reading URL 1 year ago
yflory 96a00f89df Generate burn after reading link for pads 1 year ago
yflory 14905a5693 Support ownerKey in file hash 1 year ago
yflory d4d07f3332 Add support for ownerKey in the hash (version 1 and 2) 1 year ago
yflory 8ccafbc821 Fix tests for invite url 1 year ago
Ludovic Dubost d593f3961c Updated OnlyOffice editors, Updated x2t.js to 5.4.2 1 year ago
Ludovic Dubost aac353c5fc Support importing images for OnlyOffice documents 1 year ago
Ludovic Dubost 453082c080 Modified implementation of image support without change to filename 1 year ago
Ludovic Dubost 1065ef5d4c Fix security policy for ooslide and oodoc which would break x2t wasm export on Chrome 1 year ago
Ludovic Dubost 4cfa391a17 Fixes in onlyoffice code for image upload realtime and ooslide 1 year ago
Ludovic Dubost 1c19e6f2b5 Fixed callback in case of error on image fetching 1 year ago
Ludovic Dubost 9b2e4816c8 Added support for exporting images 1 year ago
Ludovic Dubost d9fb248504 Fixes for image support 1 year ago
Ludovic Dubost 6155284c4d Merged initial patches for image upload, partially working 1 year ago
Ludovic Dubost cf06614265 Added readme to x2t.js code 1 year ago
Ludovic Dubost 1123928942 Updated x2t library. Added support for Excel2007 1 year ago
Ludovic Dubost 2ef61371f4 Added support for ods 1 year ago
Ludovic Dubost 36926e11c9 Support ods import 1 year ago
Ludovic Dubost 7026123bfc Implementing Spreadsheet XLSX import/export using WebAssembly 1 year ago
14355 changed files with 2013575 additions and 1735858 deletions
Split View
  1. 45
      .github/ISSUE_TEMPLATE/bug_report.md
  2. 2
      .gitignore
  3. 5
      .jshintignore
  4. 346
      CHANGELOG.md
  5. 38
      Jenkinsfile
  6. 4
      bower.json
  7. 347
      config/config.example.js
  8. 2
      customize.dist/ckeditor-config.js
  9. 69
      customize.dist/loading.js
  10. 10
      customize.dist/login.js
  11. 3
      customize.dist/messages.js
  12. 8
      customize.dist/pages.js
  13. 53
      customize.dist/src/less2/include/alertify.less
  14. 52
      customize.dist/src/less2/include/buttons.less
  15. 4
      customize.dist/src/less2/include/colortheme.less
  16. 113
      customize.dist/src/less2/include/corner.less
  17. 3
      customize.dist/src/less2/include/dropdown.less
  18. 7
      customize.dist/src/less2/include/fileupload.less
  19. 49
      customize.dist/src/less2/include/markdown.less
  20. 17
      customize.dist/src/less2/include/modal.less
  21. 120
      customize.dist/src/less2/include/modals-ui-elements.less
  22. 9
      customize.dist/src/less2/include/notifications.less
  23. 26
      customize.dist/src/less2/include/sidebar-layout.less
  24. 13
      customize.dist/src/less2/include/tokenfield.less
  25. 18
      customize.dist/src/less2/include/toolbar.less
  26. 23
      customize.dist/src/less2/include/usergrid.less
  27. 1
      customize.dist/src/less2/include/variables.less
  28. 2
      customize.dist/src/outer.css
  29. 14
      customize.dist/translations/messages.hi.js
  30. 14
      customize.dist/translations/messages.sv.js
  31. 28
      docs/cryptpad.service
  32. 11
      docs/example.nginx.conf
  33. 1011
      historyKeeper.js
  34. 65
      import
  35. 35
      lib/api.js
  36. 197
      lib/commands/admin-rpc.js
  37. 172
      lib/commands/block.js
  38. 275
      lib/commands/channel.js
  39. 149
      lib/commands/core.js
  40. 189
      lib/commands/metadata.js
  41. 298
      lib/commands/pin-rpc.js
  42. 107
      lib/commands/quota.js
  43. 89
      lib/commands/upload.js
  44. 11
      lib/deduplicate.js
  45. 86
      lib/defaults.js
  46. 285
      lib/historyKeeper.js
  47. 910
      lib/hk-util.js
  48. 26
      lib/load-config.js
  49. 13
      lib/log.js
  50. 217
      lib/metadata.js
  51. 7
      lib/once.js
  52. 185
      lib/pins.js
  53. 235
      lib/plan.js
  54. 216
      lib/rpc.js
  55. 172
      lib/schedule.js
  56. 628
      lib/storage/blob.js
  57. 1260
      lib/storage/file.js
  58. 397
      lib/storage/tasks.js
  59. 84
      lib/stream-file.js
  60. 576
      lib/workers/db-worker.js
  61. 367
      lib/workers/index.js
  62. 527
      package-lock.json
  63. 10
      package.json
  64. 1766
      rpc.js
  65. 3
      scripts/check-account-deletion.js
  66. 42
      scripts/compare-pin-methods.js
  67. 8
      scripts/diagnose-archive-conflicts.js
  68. 90
      scripts/evict-inactive.js
  69. 4
      scripts/expire-channels.js
  70. 4
      scripts/migrations/migrate-tasks-v1.js
  71. 5
      scripts/restore-archived.js
  72. 235
      scripts/tests/test-mailbox.js
  73. 46
      scripts/tests/test-pins.js
  74. 41
      scripts/tests/test-plan.js
  75. 183
      scripts/tests/test-rpc.js
  76. 220
      scripts/tests/test-scheduler.js
  77. 295
      server.js
  78. 59
      storage/README.md
  79. 628
      storage/blob.js
  80. 1053
      storage/file.js
  81. 413
      storage/tasks.js
  82. 24
      www/admin/app-admin.less
  83. 2
      www/admin/index.html
  84. 29
      www/admin/inner.js
  85. 42
      www/assert/main.js
  86. 41
      www/auth/main.js
  87. 33
      www/code/app-code.less
  88. 2
      www/code/index.html
  89. 117
      www/code/inner.js
  90. 750
      www/code/markers.js
  91. 4
      www/code/mermaid-new.css
  92. 59134
      www/code/mermaid.js
  93. 56
      www/code/mermaid.min.js
  94. 9
      www/common/application_config_internal.js
  95. 176
      www/common/common-hash.js
  96. 332
      www/common/common-interface.js
  97. 12
      www/common/common-messaging.js
  98. 1916
      www/common/common-ui-elements.js
  99. 74
      www/common/common-util.js
  100. 273
      www/common/cryptpad-common.js

45
.github/ISSUE_TEMPLATE/bug_report.md

@ -0,0 +1,45 @@
---
name: Bug report
about: Create a report to help us improve
title: ''
labels: ''
assignees: ''
---
**Describe the bug**
A clear and concise description of what the bug is.
**Where did it happen?**
Did the issue occur on CryptPad.fr or an instance hosted by a third-party?
If on another instance, please provide its full URL.
**To Reproduce**
Steps to reproduce the behavior:
1. Go to '...'
2. Click on '....'
3. Scroll down to '....'
4. See error
**Expected behavior**
A clear and concise description of what you expected to happen.
**Screenshots**
If applicable, add screenshots to help explain your problem.
**Browser (please complete the following information):**
- OS: [e.g. iOS]
- Browser [e.g. firefox, tor browser, chrome, safari, brave, edge, ???]
- variations [e.g. Firefox nightly, Firefox ESR, Chromium, Ungoogled chrome]
- Version [e.g. 22]
- Extensions installed (UBlock Origin, Passbolt, LibreJS]
- Browser tweaks [e.g. firefox "Enhanced Tracking Protection" strict/custom mode, tor browser "safer" security level, chrome incognito mode]
**Smartphone (please complete the following information):**
- Device: [e.g. iPhone6]
- OS: [e.g. iOS8.1]
- Browser [e.g. stock browser, safari]
- Version [e.g. 22]
**Additional context**
Add any other context about the problem here.

2
.gitignore

@ -20,4 +20,4 @@ block/
logs/
privileged.conf
config/config.js
*yolo.sh

5
.jshintignore

@ -7,6 +7,9 @@ www/common/highlight/
www/common/jquery-ui/
www/common/onlyoffice/sdkjs
www/common/onlyoffice/web-apps
www/common/onlyoffice/x2t
www/common/onlyoffice/v1
www/common/onlyoffice/v2*
server.js
www/common/old-media-tag.js
@ -31,4 +34,4 @@ customize/
www/debug/chainpad.dist.js
www/pad/mathjax/
www/code/mermaid.js
www/code/mermaid*.js

346
CHANGELOG.md

@ -1,3 +1,349 @@
# Quagga release (3.16.0)
## Goals
We've continued to keep a close eye on server performance since our last release while making minimal changes. Our goal for this release has been to improve server scalability further while also addressing user needs with updates to our client code.
We were pleasantly surprised to receive a pull request implementing a basic version of [author colors](https://github.com/xwiki-labs/cryptpad/issues/41) in our code editor. Since it was nearly ready to go we set some time aside to polish it up a little bit to include it in this release.
## Update notes
We've updated the example nginx config in order to include an `Access-Control-Allow-Origin` header that was not included. We've also added a new configuration point in response to [this issue](https://github.com/xwiki-labs/cryptpad/issues/529) about the server's child processes using too many threads. Administrators may not set a maximum number of child processes via `config.js` using `maxWorkers: <number of child processes>`. We recommend using one less than the number of available cores, though one worker should be sufficient as long as your server is not under heavy load.
As usual, updating from the previous release can be accomplished by:
1. stopping your server
2. pulling the latest code with git
3. installing clientside dependencies with `bower update`
4. installing serverside dependencies with `npm i`
5. restarting your server
## Features
* As mentioned above, we've built upon a very helpful [PR](https://github.com/xwiki-labs/cryptpad/pull/522) from members of the Piratenpartei (German Pirate Party) to introduce author colors in our code editor. It's still experimental, but registered users can enable it on pads that they own via the "Author colors" entry in the `...` menu found beneath their user admin menu.
* Serverside performance optimizations
* Automatically expiring pads work by creating a task to be run at the target date. This process involves a little bit of hashing, so we've changed it to be run in the worker.
* The act of deleting a file from the server actually moves it to an archive which is not publicly accessible. These archived files are regularly cleaned up if you run `scripts/evict-inactive.js`. Unfortunately, moving files is more expensive than deletion, so we've noticed spikes in CPU when users delete many files at once (like when emptying the trash from their drive). To avoid such spikes while the server is already under load we've implemented per-user queues for deletion.
* We've also noticed that when we restart our server while it is under heavy load some queries can time out due to many users requesting history at once. We've implemented another queue to delegate tasks to workers in the order that they are received. We need to observe how this system performs in practice, so there might be small tweaks as we get more data.
* As noted above, we've made the number of workers configurable. At the same time we unified two types of workers into one, cutting the number of workers in half.
* We've added a new admin RPC call to request some information about the server's memory usage to help us debug what seems to be a small memory leak.
* Most of our editors were previously loaded with two more iframes on the page in addition to our main sandboxed iframe. These separate frames ensure that encryption keys are not exposed to the same iframe responsible for displaying the rest of CryptPad's UI. One was responsible for loading the "filepicker" for inserting media into your documents, the other was responsible for handling encryption keys for the share modal. Since we wanted to add two new functions using iframes in the same manner we took the opportunity to come up with a generic solution using only one iframe for these separate modals, since they all have the same level of privilege to the sensitive data we're trying to protect.
* Our mermaidjs integration has been customized to be a little easier on the eyes. We focused in particular on GANTT charts, though other charts should be more appealing as well, especially in the new "lightbox" UI introduced in our last release.
* We now prompt unregistered users to register or log in when they use the spreadsheet editor. For context, unregistered users don't benefit from all of the same features as registered users, and this makes a few performance optimizations impossible.
* Finally, we've continued to receive translations from contributors in Catalan, German, and Dutch.
## Bug fixes
* We noticed that under certain conditions clients were sending metadata queries to the server for documents that don't have metadata. We've implemented some stricter checks to prevent these useless queries.
* We've implemented a temporary fix for our rich text editor to solve [this issue](https://github.com/xwiki-labs/cryptpad/issues/526) related to conflicting font-size and header styles.
* We also accepted [this PR](https://github.com/xwiki-labs/cryptpad/pull/525) to tolerate server configurations specifying a `defaultStorageLimit` of 0.
* Finally, we noticed that embedded media occasionally stopped responding correctly to right-click events due to a problem with our in-memory cache. It has since been fixed.
# PigFootedBandicoot release (3.15.0)
## Goals
Our plan for this release was to allow our server's code to stabilize after a prologued period of major changes. The massive surge of new users on cryptpad.fr forced us to change our plans and focus instead on increasing performance and scalability of our serverside code and its supporting infrastructure. Most of this release's changes have been thoroughly tested as they've been deployed to our instance on an ongoing basis, however, we're still looking forward to stabilizing as planned.
We also ended up making significant improvements to our clientside code, since the increased load on the server seemed to exacerbate a few race conditions which occurred less frequently under the previous circumstances.
## Update notes
Updating from version 3.14.0 should follow the usual process:
1. stop your server
2. fetch the latest code with git
3. install clientside dependencies with `bower update`
4. install serverside dependencies with `npm i`
5. start your server
You may notice that the server now launches a number of child processes named `crypto-worker.js` and `db-worker.js`. These worker processes make use of however many cores your server has available to perform more CPU-intensive tasks in parallel.
## Features
* As noted above, the server uses an multi-process architecture and parallelizes more routines. This improvement will be the most noticeable when the server is run on ARM processors which validate cryptographic signatures particularly slowly.
* The admin panel available to instance administrators now displays a list of "Open files". We added this to help us diagnose a "file descriptor leak" which will be described in the _Bug fixes_ section.
* We received a large number of contributions from translators via our [weblate instance](https://weblate.cryptpad.fr/projects/cryptpad/app/). Most notably, Italian is the fourth language to be fully translated with Finnish and Spanish seemingly in line to take the fifth and sixth spots.
* We've addressed some usability issues in our whiteboard app in response to increased interest. Its canvas now automatically resizes according to the size of your screen and the content you've drawn. Unfortunately, we noticed that the "embed image" functionality was imposing some additional strain on our server, so we decided to implement an admittedly arbitrary limit of 1MB on the size of images embedded in whiteboards. We'll consider removing this restriction when we have time to design a more efficient embedding system.
* We've removed the per-user setting which previously allowed registered users to skip the "pad creation screen" which is displayed before creating a document. This setting has not been the default for some time and was not actively tested, so this "feature" is our way of guaranteeing no future regressions in its behaviour.
* As a part of our effort to improve the server's scalability we evaluated which clientside requests could be sent less often. One such request came from the "usage bar" found in users' drives, teams, and settings pages. Previously it would update every 30 seconds no matter what. Now it only updates if that tab is focused.
* Most actions that an administrator can take with regard to a user's account require the "public key" which is used to identify their account. This key is available on the user's settings page, but many users share their profile URL instead. We've added a button to profile pages which copies the user's public key to the clipboard, so now either page will be sufficient.
* We've updated our [mermaidjs](https://mermaid-js.github.io/mermaid/#/) dependency. For those that don't know, Mermaid is a powerful markup syntax for producing a variety of charts. It's integrated into our code editor. This updated version supports GANTT chart tasks with multiple dependencies, pie charts, and a variety of other useful formats.
* We found that in practice our mermaid charts and other embedded media were sufficiently detailed that they became difficult to read on some screens. In response we've added the ability to view these elements in a "lightbox UI" which is nearly full-screen. This interface is can be used to view media contained in the "preview pane" of the code editor as well as within user and team drives, as well as a few other places where Markdown is used.
## Bug fixes
This release contains fixes for a lot of bugs. We'll provide a brief overview, but in the interest of putting more time towards development I'll just put my strong recommendation that you update.
* The server process didn't always close file descriptors that it opened, resulting in an EMFILE error when the system ran out of available file descriptors. Now it closes them.
* The server also kept an unbounded amount of data in an in-memory cache under certain circumstances. Now it doesn't.
* A simple check to ignore the `premiumUploadSize` config value if it was less than `maxUploadSize` incorrectly compared against `defaultStorageLimit`. Premium upload sizes were disabled on our instance when we increased the default storage limit to 1GB. It's fixed now.
* We accepted a [PR](https://github.com/xwiki-labs/cryptpad/pull/513) to prevent a typeError when logging to disk was entirely disabled.
* We identified and fixed the cause of [This issue](https://github.com/xwiki-labs/cryptpad/issues/518) which caused spreadsheets not to load.
* Emojis at the start of users display names were not displayed correctly in the Kanban's "cursor"
* We (once again) believe we've fixed the [duplicated text bug](https://github.com/xwiki-labs/cryptpad/issues/352). Time will tell.
* Our existing Mermaidjs integration supported the special syntax to make elements clickable, but the resulting links don't work within CryptPad. We now remove them.
* Rather than having messages time out if they are not received by the server within a certain timeframe we now wait until the client reconnects, at which point we can check whether those messages exist in the document's history. On a related note we now detect when the realtime system is in a bad state and recreate it.
* Finally, we've fixed a variety of errors in spreadsheets.
# OrienteCaveRat release (3.14.0)
## Goals
We planned a one-week release cycle in order to finish up some major features that were already in development during our last release.
In the meantime, the reaction to the COVID-19 pandemic has resulted in a greatly increased load on our servers, so we've begun to focus on improving stability to ensure that we are able to keep up with demand.
## Update notes
We had some trouble during the week of March 9th, 2020, as the CryptPad.fr server started throwing EMFILE errors. This means that it was trying to open new files (for reading or writing) but there were too many files open already. We've added some new code to help debug the issue, but there is not yet a fix in place. The maximum number of open files on our host OS had been increased by several orders of magnitude (several years ago) but we're now aware that the systemd service file that launches the API server does not respect this global limit. As such, we've updated the example service file to indicate how you can update this limit yourself. For an example of how to update this limit at the OS level, see this page: https://docs.oracle.com/cd/E19623-01/820-6168/file-descriptor-requirements.html
Otherwise, updating from 3.13.0 to 3.14.0 is as usual:
1. stop your server
2. fetch the latest source
3. `npm i`
4. `bower update`
5. restart your server
## Features
We're very happy to announce a major update to our kanban application! We've made a lot of changes, but the most notables ones are:
* the ability to add markdown content to your cards and edit it collaboratively in real-time
* tags on cards and the ability to filter cards by tags at the top of the application
* indicators to show if a card is being modified by another user while you are editing it
* the ability to toggle between an 'overview mode' which hides everything but your cards titles and a full mode which shows everything
* vertical scrolling for very tall columns, and horizontal scrolling for columns that don't fit on your screen (intead of reflowing to the next line)
* a smaller palette of pre-chosen colors for cards and boards instead of a color-picker, to make it easier to choose matching colors for tasks
* the ability to drag cards and boards to the trash instead of having to click a small X and confirm their deletion
We've also improved message throughput for our server by splitting cryptographic signature validation into separate processes. On a quad core server this means you should be able to handle (roughly) four times the messages.
## Bug fixes
* Drive:
* a regression in the drive for anonymous users made it impossible to delete contained pads directly from the drive (though deletion from the pad itself was working). It's now back to normal.
* we've updated the translation key referenced in [issue 482](https://github.com/xwiki-labs/cryptpad/issues/482) to clarify what qualifies a pad as "recently modified".
* We noticed (and fixed) another regression that disabled our recently introduced "history trim" functionality.
* We've identified and addressed a few client networking errors that were causing clients to disconnect (and to get stuck in a reconnecting state), but we're still actively looking for more.
* Server:
* we've added some extra checks to try to identify where our file descriptor leak is coming from, we'll release fixes as they become available.
* we've caught a typeError that only ever happened while the server was overwhelmed with EMFILE errors.
* [this PR](https://github.com/xwiki-labs/cryptpad/pull/503) fixed an incorrect conditional expression at launch-time.
* We fixed a bug in our spreadsheet editor that was causing sheets not to load. Sheets affected by this issue should be repaired. We ask that you submit a report ticket on your instance if you encounter a sheet that wasn't fixed.
# NorthernWhiteRhino release (3.13.0)
## Goals
This release cycle we prioritized the completion of "access lists", a major feature that we're excited to introduce.
## Update notes
Nearly every week (sometimes more than once) we end up taking time away from development to help administrators to configure their CryptPad instances. We're happy to see more instances popping up, but ideally we'd like to spend more of our time working on new features. With this in mind we devoted some time to simplify instance configuration and to clarify some points where people commonly have difficulty.
If you review `cryptpad/config.example.js` you'll notice it is significantly smaller than it was last release.
Old configuration files should be backwards compatible (if you copied `config.example.js` to `config.js` in order to customize it).
The example has been reorganized so that the most important parts (which people seemed to miss most of the time) are at the top.
Most of the fields which were defined within the config file now have defaults defined within the server itself.
If you supply these values they will override the default, but for the most part they can be removed.
We advise that you read the comments at the top of the example, in particular the points related to `httpUnsafeOrigin` and `httpSafeOrigin` which are used to protect users' cryptographic keys in the event of a cross-site scripting (XSS) vulnerability.
If these values are not correctly set then your users will not benefit from all the security measures we've spent lots of time implemented.
A lot of the fields that were present as modifiable defaults have been removed or commented out in the example config.
If you supply them then they will override the default behaviour, however, you probably won't need to and doing so might break important functionality.
Content-Security Policy (CSP) definitions should be safe to remove, as should `httpAddress`, `httpPort`, and `httpSafePort` (unless you need to run the nodejs API server on an address other than `localhost` or port 3000.
Up until now it's been possible for administrators to allow users to pay for accounts (on their server) via https://accounts.cryptpad.fr.
Our intent was to securely handle payment and then split the proceeds between ourselves and the instance's administrator.
In practice this just created extra work for us because we ended up having to contact admins, all of whom have opted to treat the subscription as a donation to support development.
As such we have disabled the ability of users to pay for premium subscriptions (on https://accounts.cryptpad.fr) for any instance other than our own.
Servers with premium subscriptions enabled were configured to check whether anyone had subscribed to a premium account by querying our accounts server on a daily basis.
We've left this daily check in place despite premium subscriptions being disabled because it informs us how many third-party instances exist and what versions they are running.
We don't sell or share this information with anyone, but it is useful to us because it informs us what older data structures we have to continue to support.
For instance, we retain code for migrating documents to newer data formats as long as we know that there are still instances that have not run those migrations.
We also cite the number of third-party instances when applying for grants as an indicator of the value of funding our project.
In any case, you can disable this daily check-in by setting `blockDailyCheck` to `true` in `config/config.js`.
Finally, we've implemented the ability to set a higher limit on the maximum size of uploaded files for premium users (paying users on CryptPad.fr and users with entries in `customLimits` on other instances).
Set this limit as a number (of bytes) with `premiumUploadSize` in your config file.
## Features
* It is often difficult to fix problems reported as GitHub issues because we don't have enough information. The platform's repository now includes an _issue template_ which includes a list of details that will probably be relevant to fixing bugs. Please read the list carefully, as we'll probably just close issues if information that we need was not included.
* We've made it easy to terminate all open sessions for your account. If you're logged in, you'll now see a _log out everywhere_ button in the _user admin menu_ (in the top-right corner of the screen).
* You may still terminate only _remote sessions_ while leaving your local session intact via the pre-existing button on the settings page's _confidentiality_ tab.
* You may have noticed that it takes progressively longer to load your account as you add more files to your drive, shared folders, and teams. This is because an integrity check is run on all your files when you first launch a CryptPad session. We optimized some parts of this check to speed it up. We plan to continue searching for similar processes that we can optimize in order to decrease loading time and run-time efficiency.
* Lastly, this release introduces **access lists**, which you can use to limit who can view your documents _even if they have the keys required to decrypt them_. You can do so by using the _Access_ modal for any given document, available in the `...` dropdown menu in each app's toolbar or when right-clicking in the drive.
* Enabling access restriction for a document will disallow anyone except its owners or allowed users from opening it. Anyone else who is currently editing or viewing the document will be disconnected from the session.
## Bug fixes
* A member of _C3Wien_ reported some strange behaviour triggered by customizing some of Firefox's anti-tracking features. The settings incorrectly identified our cross-domain sandboxing system as a tracker and interfered with its normal functionality. As a result, the user was treated as though they were not logged in, even though pads from their account's drive were displayed within the "anonymous drive" that unregistered users normally see.
* This was simple to fix, requiring only that we adjust our method of checking whether a user is logged in.
* If you ever notice odd behaviour we do recommend that you review any customizations you've made to your browser, as we only test CryptPad under default conditions unless prompted to investigate an issue.
* Users that take advantage of the Mermaid renderer in our markdown editor's preview pane may have noticed that the preview's scroll position was lost whenever mermaid charts were modified. We've updated our renderer such that it preserves scroll position when redrawing elements, making it easier to see the effects of your changes when editing large charts.
# Megaloceros release (3.12.0)
## Goals
As of our last release our 'history trim' functionality was almost ready to go. We took this release period to do some extensive testing and to prepare the 'allow list' functionality which will be included in our next release.
In the meantime, we also aimed to improve performance, add a few small but nice features, and fix a number of bugs.
## Update notes
This release includes updates to:
1. the server and its dependencies
2. the example nginx configuration which we recommend for production installations
4. the client code and its dependencies
Our ability to debug CryptPad's usage of shared workers (on the client) has been complicated by the fact that Firefox's shared worker debugging panel was not working for our instance. We finally traced the problem back to a Content-Security Policy setting in our configuration file. The issue can be addressed by adding a `resource:` entry in the `connect-src` header. We've updated the example nginx config to reflect this. You can deploy this version of CryptPad without this modification, but without it our ability to debug and fix issues related to shared worker will be extremely limited.
Otherwise, updating from CryptPad v3.11.0 is pretty much the same as normal:
1. stop your server
2. pull the latest code via git
3. `npm i` to get the latest server dependencies
4. `bower update` to get the latest client dependencies
5. restart your server
## Features
* The CryptPad server stores documents as a series of encrypted changes to a blank document. We have mechanisms in place that make it so clients only need the most recent changes to view the document, but the storage requirements on the server would only ever grow unless you deleted the entire document. As of this release, owners of document have the option to remove that unnecessary history. To do so: right-click a pad in a drive or shared folder and choose the properties option in the menu. The bottom of the properties popup will display the document's size. If there is any history that is eligible for removal, a button will be displayed to remove it.
* This option is only available for the pad's owners. If it has no owners then it will not be possible to remove its history.
* It is not yet possible to trim the history of spreadsheets, as they are based on a different system than the rest of our documents and it will take some additional work to add this functionality.
* We've also added the ability to easily make copies of documents from your drive. Right-click on documents and select "make a copy" from the menu.
* This feature doesn't work for files. Files can't be modified anyway, so there's little value in making copies.
* We haven't added the ability to make a copy of a spreadsheet yet for the same reasons as above.
* We've improved the way our markdown renderer handles links to better support a variety of types of URLs:
* anchors, like `[bug fixes](#bug-fixes)`
* relative paths, like `[cryptpad home page](/index.html)` or `[a rich text pad](/pad/#/pad/view/12151241241254123412451231231221)`
* absolute URLs without the protocol, like `[//github.com/xwiki-labs/cryptpad)
* We've optimized a background process that iterates over a part of the database when you first launch the CryptPad server. It now uses less memory and should incur less load on the CPU when restarting the server. This should allow the server to spend its resources handling clients that are trying to reconnect.
* We've also optimized some client-side code to prioritize loading your drive instead of some other non-essential resources used for notifications. Pages should load faster. We're working on some related improvements to address page load time which we'll introduce on an ongoing basis.
* As noted above, we're finally able to debug shared workers in Firefox. We're investigating a few issues that were blocked by this limitation, and we hope to include a number of bug fixes in upcoming releases.
* We've continued some ongoing improvements to the instance admin panel and introduced the ability to link directly to a support ticket. The link will only be useful to users who would already be able to open the admin panel.
* The code responsible for fetching and scanning the older history of a document has also been optimized to avoid handling messages for channels multiple times.
* Finally, we've received contributions from our German and Italian translators via our weblate instance.
* We're always looking for more help with localization. You can review the status of our translations and contribute to them [here](https://weblate.cryptpad.fr/projects/cryptpad/app/).
## Bug fixes
* After a lot of digging we believe we've identified and fixed a case of automatic text duplication in our rich text editor. We plan to wait a little longer and see if [reports of the incorrect behaviour](https://github.com/xwiki-labs/cryptpad/issues/352) really do stop, but we're optimistic that this problem has been solved.
* [Another GitHub issue](https://github.com/xwiki-labs/cryptpad/issues/497) related to upgrading access for team members has been fixed. If you continue to have issues with permissions for team members, we recommend haging the team owner demote the affected users to viewers before promoting them to the desired access level.
* We've fixed a number of small issues in our server:
* The server did not correctly respond to unsupported commands for its SET_METADATA RPC. Instead of responding with an error it ignored the message. In practice this should not have affected any users, since our client only uses supported commands.
* The server used to log for every entry in a document's metadata log that contained an unsupported command. As we develop we occasionally have to such logs with older versions of the code that don't support every command. To avoid filling the logs with errors, we now ignore any errors of a given type beyond the first one encountered for a given document.
* We've fixed an issue with read-only spreadsheets that was introduced in our previous release. An overlay intended to prevent users from interacting with the spreadsheet while disconnected was incorrectly applied to spreadsheets in read-only mode, preventing users from copying their data.
* Clients send "pin commands" to the server to instruct it to count a document against their quota and to preserve its data even if it's considered inactive. We realized that the client wasn't including todo-lists in its list of pads to pin and have updated the client to do so.
# LabradorDuck release (3.11.0)
## Goals
For this release we aimed to phase in two major features that we've been anticipating for a while: "history trim" and "safe links".
History trim will allow users to remove the old versions of their documents which continue to count against their storage quotas. It will be formally introduced in our next release, even though its server-side components are all ready. We had to reorganize and modify a lot of our server code, so we wanted to wait and make sure there were no regressions in our existing functionality before moving ahead.
We're introducing the concept of "safe links" in CryptPad. Users can continue to share links to documents which include the cryptographic secrets necessary to read or edit them, but whenever possible we will replace those secrets with a document id. This will make it less likely for encryption keys to be exposed to third parties through invasive browser extensions or passive behaviour like history synchronization across devices.
## Update notes
This release features a few changes to the server:
1. The "legal notice" feature which we included in the previous release turned out to be incorrect. We've since fixed it. We document this functionality [here](https://github.com/xwiki-labs/cryptpad/blob/e8b905282a2cde826ad9100dcad6b59a50c70e8b/www/common/application_config_internal.js#L35-L41), but you'll need to implement the recommended changes in `cryptpad/customize/application_config.js` for best effect.
2. We've dropped server-side support for the `retainData` attribute in `cryptpad/config/config.js`. Previously you could configure CryptPad to delete unpinned, inactive data immediately or to move it into an archive for a configurable retention period. We've removed the option to delete data outright, since it introduces additional complexity in the server which we don't regularly test. We also figure that administrators will appreciate this default in the event of a bug which incorrectly flags data as inactive.
3. We've fixed an incorrect line in [the example nginx configuration file](https://github.com/xwiki-labs/cryptpad/commit/1be01c07eee3431218d0b40a58164f60fec6df31). If you're using nginx as a reverse proxy for your CryptPad instance you should correct this line. It is used to set Content-Security Policy headers for the sandboxed-iframe which provides an additional layer of security for users in the event of a cross-site-scripting (XSS) vulnerability within CryptPad. If you find that your instance stops working after applying this change it is likely that you have not correctly configured your instance to use a secondary domain for its sandbox. See [this section of `cryptpad/config/config.example.js`](https://github.com/xwiki-labs/cryptpad/blob/c388641479128303363d8a4247f64230c08a7264/config/config.example.js#L94-L96) for more information.
Otherwise, deploying the new code should be fairly simple:
1. stop your server
2. fetch the latest code from the git repository
3. update your server dependencies with `npm install`
4. update your clientside dependencies with `bower update`
5. start your server
## Features
* We've slightly reorganized the _settings_ page to include a new "Confidentiality" section. It includes a checkbox to enable "safe links", which will remove the cryptographic secrets from your documents' URLs whenever possible. It is currently off by default but will most likely default to true in the near future. Otherwise, the settings page has an updated layout which is generally easier to read.
* We've remove the "Owned pads" category from the CryptDrive application. It was included to provide an overview of pads that you could delete when we first introduced that functionality, however, we've realized that it is generally not very useful.
* We implemented the ability to convert a regular folder in your drive into a _shared folder_ several months ago, but disabled it when we discovered that it had some bugs. We finally got around to fixing those bugs and so it is officially ready for public use.
* We've continued to make little changes to improve the discoverability of CryptPad's social features. Unregistered users that view another user's profile are now informed that they can send that profile's owner a contact request once they register.
* You may remember that CryptPad's contacts used to be called "friends". We've changed this terminology to reflect that you might work with people with whom you do not have a close personal relationship.
* We analyzed CryptPad for possible vectors for social abuse as a part of our _Teams_ project, sponsored by NLnet foundation. During this audit we identified that the main method for abuse was through the direct messaging/notifications system. We added the ability to mute users, but realized it could be difficult to find the profile page of the person you want to mute. As of this release, any notification triggered by a remote user's actions will include their avatar and a link to their profile. If you find any user's behaviour abusive or annoying you can go straight to their profile and mute them.
* We've made a small improvements to the admin panel's support ticket view. Tickets which have not received a response are now highlighted in red.
* The login/register pages had a minor bug where the loading screen was not correctly displayed the second time you tried to enter your password. This was because the key derivation function which unlocks the corresponding user credentials was keeping the CPU busy and preventing an animation from running. It has since been corrected.
* We've continued to make some small but important changes to various UI elements that are reused throughout the platform. The password field in the _pad properties dialog_ has been tweaked for better color contrast. Similarly, the small notice that pops up in the bottom right hand corner to prompt you to store a pad in your drive has been restyled. We've also implemented a second variation on this popup to display general information not directly related to the current pad. Both of these UI elements better match the general appearance of the rest of the platform and represent a continued effort to improve its visual consistency.
* The spreadsheet editor has received some attention in the last few weeks as well. It is now able to gracefully resume a session when you reconnect to the server after an interruption. Likewise, the locking system which prevents two users from editing a cell at the same time is now significantly faster, and completely disabled if you're editing alone. Now that it's possible for unregistered users to edit spreadsheets we've had to improve the color contrast for the toolbar message which prompts users to register in order to ensure that a spreadsheet isn't deleted due to inactivity.
* The "file upload status table" has received some attention as well, in response to [issue 496](https://github.com/xwiki-labs/cryptpad/issues/496). When you upload many files to CryptPad in a row you'll see them all displayed in a table which will include a scrollbar if necessary.
## Bug fixes
* [Issue 441](https://github.com/xwiki-labs/cryptpad/issues/441 "Other users writing in pad hiijacks chat window") has been fixed.
* We found a bug that affected encrypted files saved to your CryptDrive via the right-click menu. The files were saved in an incorrect format and were unusable. They should behave normally now.
* Finally, we identified a race condition whereby if two users sent each other contact requests at the same time the request might not be accepted correctly. This process should now be much more reliable.
# Kouprey release (3.10.0)
## Goals
For this release we aimed to finish the last major feature of our CryptPad Teams project as well as some long-awaited features that we've planned to demo at FOSDEM 2020.
## Update notes
The CryptPad repository's _docs_ directory now includes a _systemd service file_ which you can use to ensure that CryptPad stays up and running. We're working on some step-by-step documentation to describe how to make use of it, but for now you can probably find some instructions by searching the web.
We've also updated the provided example.nginx.conf to include a minor but important change to the CSP settings for our OnlyOffice spreadsheet integration.
Up until now we have not been deleting unowned encrypted files from our server. As of this release `cryptpad/scripts/evict-inactive.js` includes logic to identify inactive, unpinned files. Identified files are first moved to your instance's _archive_ directory for a configurable period, after which they are deleted. This script is not run automatically, so if you haven't configured a cron job to run periodically then inactive files will not be removed. We recommend running the script once per day at a time when you expect your server to be relatively idle, since it consumes a non-negligible amount of server resources.
Finally, in case you live in a political jurisdiction that requires web site administrators to display their legal information, we've made it easier to add a link to a custom page. See `cryptpad/www/common/application_config_internal.js` for details, particularly the comments above `config.imprint`.
To update from v3.9.0:
1. update the CSP settings in your reverse proxy's configuration file to match those in nginx.example.conf
* don't forget to reload your server to ensure that your changes are deployed
2. stop your API server
3. pull the latest server/client code with `git pull origin master`
4. install the latest clientside dependencies with `bower update`
5. relaunch your server
## Features
* Owned pads can now be shared in _self-destruct_ mode as an additional option in the _access rights_ section of the _share menu_.
* to use self-destructing pads:
1. select `View once and self-destruct`
2. share the _self-destructing pad link_ directly with a contact or create and copy a link
3. recipients who open the link will land on a warning page informing them about what is about to happen
4. once they click through the link, they'll see the content and automatically delete it from the server
5. opening the same link a second time will not yield any content
* note that deletion affects the original document that you choose to share. It does not create a copy
* We no longer consider spreadsheets to be a BETA application!
* we've been using them for some time and while there are still points to improve we consider them stable enough for regular use
* this change in status is due to a few big updates:
1. we've integrated a recent version of OnlyOffice in which a number of bugs were fixed
2. we've enabled the use of spreadsheets for unregistered users, though registration is still free and will provide a better experience
3. it's now possible to upload encrypted images into your spreadsheets, in case you're the type of person that puts images in spreadsheets
4. you can also import and export spreadsheets between CryptPad's internal format and XLSX. This conversion is run entirely in your browser, so your documents stay private. Unfortunately it relies on some new features that are not available in all browsers. Chrome currently supports it, and we expect Firefox to enable support as of February 11th, 2020
* Finally, we've continued to receive contributions from our numerous translators (via https://weblate.cryptpad.fr) in the following languages (alphabetical order):
* Catalan
* Finnish
* German
* Italian
* Spanish
## Bug fixes
* We found and fixed an incorrect usage of the pinned-data API in `scripts/check-account-deletion.js`.
* We also updated an incorrect client-side test in /assert/.
* A minor bug in our CSS caching system caused some content to be unnecessarily recompiled. We've implemented a fix which should speed up loading time.
# JamaicanMonkey release (3.9.0)
## Goals

38
Jenkinsfile

@ -0,0 +1,38 @@
pipeline {
environment {
registry = 'https://registry.hub.docker.com'
registryCredential = 'dockerhub_jcabillot'
dockerImage = 'jcabillot/cryptpad'
}
agent any
triggers {
cron('@midnight')
}
stages {
stage('Clone repository') {
steps{
checkout scm
}
}
stage('Build image') {
steps{
sh 'docker build --force-rm=true --no-cache=true --pull -t ${dockerImage} .'
}
}
stage('Deploy Image') {
steps{
script {
withCredentials([usernamePassword(credentialsId: 'dockerhub_jcabillot', usernameVariable: 'DOCKER_USER', passwordVariable: 'DOCKER_PASS')]) {
sh 'docker login --username ${DOCKER_USER} --password ${DOCKER_PASS}'
sh 'docker push ${dockerImage}'
}
}
}
}
}
}

4
bower.json

@ -30,8 +30,8 @@
"secure-fabric.js": "secure-v1.7.9",
"hyperjson": "~1.4.0",
"chainpad-crypto": "^0.2.0",
"chainpad-listmap": "^0.7.0",
"chainpad": "^5.1.0",
"chainpad-listmap": "^0.9.0",
"chainpad": "^5.2.0",
"file-saver": "1.3.1",
"alertifyjs": "1.0.11",
"scrypt-async": "1.2.0",

347
config/config.example.js

@ -1,68 +1,118 @@
/*@flow*/
/*
globals module
*/
var _domain = 'http://localhost:3000/';
// You can `kill -USR2` the node process and it will write out a heap dump.
// If your system doesn't support dumping, comment this out and install with
// `npm install --production`
// See: https://strongloop.github.io/strongloop.com/strongblog/how-to-heap-snapshots/
/* globals module */
// to enable this feature, uncomment the line below:
// require('heapdump');
/* DISCLAIMER:
// we prepend a space because every usage expects it
// requiring admins to preserve it is unnecessarily confusing
var domain = ' ' + _domain;
There are two recommended methods of running a CryptPad instance:
// Content-Security-Policy
var baseCSP = [
"default-src 'none'",
"style-src 'unsafe-inline' 'self' " + domain,
"font-src 'self' data:" + domain,
1. Using a standalone nodejs server without HTTPS (suitable for local development)
2. Using NGINX to serve static assets and to handle HTTPS for API server's websocket traffic
/* child-src is used to restrict iframes to a set of allowed domains.
* connect-src is used to restrict what domains can connect to the websocket.
*
* it is recommended that you configure these fields to match the
* domain which will serve your CryptPad instance.
*/
"child-src blob: *",
// IE/Edge
"frame-src blob: *",
We do not officially recommend or support Apache, Docker, Kubernetes, Traefik, or any other configuration.
Support requests for such setups should be directed to their authors.
/* this allows connections over secure or insecure websockets
if you are deploying to production, you'll probably want to remove
the ws://* directive, and change '*' to your domain
*/
"connect-src 'self' ws: wss: blob:" + domain,
If you're having difficulty difficulty configuring your instance
we suggest that you join the project's IRC/Matrix channel.
// data: is used by codemirror
"img-src 'self' data: blob:" + domain,
"media-src * blob:",
If you don't have any difficulty configuring your instance and you'd like to
support us for the work that went into making it pain-free we are quite happy
to accept donations via our opencollective page: https://opencollective.com/cryptpad
// for accounts.cryptpad.fr authentication and cross-domain iframe sandbox
"frame-ancestors *",
""
];
*/
module.exports = {
/* CryptPad is designed to serve its content over two domains.
* Account passwords and cryptographic content is handled on the 'main' domain,
* while the user interface is loaded on a 'sandbox' domain
* which can only access information which the main domain willingly shares.
*
* In the event of an XSS vulnerability in the UI (that's bad)
* this system prevents attackers from gaining access to your account (that's good).
*
* Most problems with new instances are related to this system blocking access
* because of incorrectly configured sandboxes. If you only see a white screen
* when you try to load CryptPad, this is probably the cause.
*
* PLEASE READ THE FOLLOWING COMMENTS CAREFULLY.
*
*/
/* httpUnsafeOrigin is the URL that clients will enter to load your instance.
* Any other URL that somehow points to your instance is supposed to be blocked.
* The default provided below assumes you are loading CryptPad from a server
* which is running on the same machine, using port 3000.
*
* In a production instance this should be available ONLY over HTTPS
* using the default port for HTTPS (443) ie. https://cryptpad.fr
* In such a case this should be handled by NGINX, as documented in
* cryptpad/docs/example.nginx.conf (see the $main_domain variable)
*
*/
httpUnsafeOrigin: 'http://localhost:3000/',
/* httpSafeOrigin is the URL that is used for the 'sandbox' described above.
* If you're testing or developing with CryptPad on your local machine then
* it is appropriate to leave this blank. The default behaviour is to serve
* the main domain over port 3000 and to serve the content over port 3001.
*
* This is not appropriate in a production environment where invasive networks
* may filter traffic going over abnormal ports.
* To correctly configure your production instance you must provide a URL
* with a different domain (a subdomain is sufficient).
* It will be used to load the UI in our 'sandbox' system.
*
* This value corresponds to the $sandbox_domain variable
* in the example nginx file.
*
* CUSTOMIZE AND UNCOMMENT THIS FOR PRODUCTION INSTALLATIONS.
*/
// httpSafeOrigin: "https://some-other-domain.xyz",
/* httpAddress specifies the address on which the nodejs server
* should be accessible. By default it will listen on 127.0.0.1
* (IPv4 localhost on most systems). If you want it to listen on
* all addresses, including IPv6, set this to '::'.
*
*/
//httpAddress: '::',
/* httpPort specifies on which port the nodejs server should listen.
* By default it will serve content over port 3000, which is suitable
* for both local development and for use with the provided nginx example,
* which will proxy websocket traffic to your node server.
*
*/
//httpPort: 3000,
/* httpSafePort allows you to specify an alternative port from which
* the node process should serve sandboxed assets. The default value is
* that of your httpPort + 1. You probably don't need to change this.
*
*/
//httpSafePort: 3001,
/* CryptPad will launch a child process for every core available
* in order to perform CPU-intensive tasks in parallel.
* Some host environments may have a very large number of cores available
* or you may want to limit how much computing power CryptPad can take.
* If so, set 'maxWorkers' to a positive integer.
*/
// maxWorkers: 4,
module.exports = {
/* =====================
* Admin
* ===================== */
/*
* CryptPad now contains an administration panel. Its access is restricted to specific
* CryptPad contains an administration panel. Its access is restricted to specific
* users using the following list.
* To give access to the admin panel to a user account, just add their user id,
* which can be found on the settings page for registered users.
* Entries should be strings separated by a comma.
*/
/*
adminKeys: [
//"https://my.awesome.website/user/#/1/cryptpad-user1/YZgXQxKR0Rcb6r6CmxHPdAGLVludrAF2lEnkbx1vVOo=",
],
*/
/* CryptPad's administration panel includes a "support" tab
* wherein administrators with a secret key can view messages
@ -77,118 +127,77 @@ module.exports = {
*/
// supportMailboxPublicKey: "",
/* =====================
* Infra setup
* ===================== */
// the address you want to bind to, :: means all ipv4 and ipv6 addresses
// this may not work on all operating systems
httpAddress: '::',
// the port on which your httpd will listen
httpPort: 3000,
// This is for allowing the cross-domain iframe to function when developing
httpSafePort: 3001,
// This is for deployment in production, CryptPad uses a separate origin (domain) to host the
// cross-domain iframe. It can simply host the same content as CryptPad.
// httpSafeOrigin: "https://some-other-domain.xyz",
httpUnsafeOrigin: domain,
/* Your CryptPad server will share this value with clients
* via its /api/config endpoint.
/* We're very proud that CryptPad is available to the public as free software!
* We do, however, still need to pay our bills as we develop the platform.
*
* If you want to host your API and asset servers on different hosts
* specify a URL for your API server websocket endpoint, like so:
* wss://api.yourdomain.com/cryptpad_websocket
* By default CryptPad will prompt users to consider donating to
* our OpenCollective campaign. We publish the state of our finances periodically
* so you can decide for yourself whether our expenses are reasonable.
*
* Otherwise, leave this commented and your clients will use the default
* websocket (wss://yourdomain.com/cryptpad_websocket)
* You can disable any solicitations for donations by setting 'removeDonateButton' to true,
* but we'd appreciate it if you didn't!
*/
//externalWebsocketURL: 'wss://api.yourdomain.com/cryptpad_websocket
//removeDonateButton: false,
/* CryptPad can be configured to send customized HTTP Headers
* These settings may vary widely depending on your needs
* Examples are provided below
/* CryptPad will display a point of contact for your instance on its contact page
* (/contact.html) if you provide it below.
*/
httpHeaders: {
"X-XSS-Protection": "1; mode=block",
"X-Content-Type-Options": "nosniff",
"Access-Control-Allow-Origin": "*"
},
contentSecurity: baseCSP.join('; ') +
"script-src 'self'" + domain,
adminEmail: 'i.did.not.read.my.config@cryptpad.fr',
// CKEditor and OnlyOffice require significantly more lax content security policy in order to function.
padContentSecurity: baseCSP.join('; ') +
"script-src 'self' 'unsafe-eval' 'unsafe-inline'" + domain,
/*
* By default, CryptPad contacts one of our servers once a day.
* This check-in will also send some very basic information about your instance including its
* version and the adminEmail so we can reach you if we are aware of a serious problem.
* We will never sell it or send you marketing mail.
*
* If you want to block this check-in and remain set 'blockDailyCheck' to true.
*/
//blockDailyCheck: false,
/* Main pages
* add exceptions to the router so that we can access /privacy.html
* and other odd pages
/*
* By default users get 50MB of storage by registering on an instance.
* You can set this value to whatever you want.
*
* hint: 50MB is 50 * 1024 * 1024
*/
mainPages: [
'index',
'privacy',
'terms',
'about',
'contact',
'what-is-cryptpad',
'features',
'faq',
'maintenance'
],
//defaultStorageLimit: 50 * 1024 * 1024,
/* =====================
* Subscriptions
* STORAGE
* ===================== */
/* Limits, Donations, Subscriptions and Contact
*
* By default, CryptPad limits every registered user to 50MB of storage. It also shows a
* subscribe button which allows them to upgrade to a paid account. We handle payment,
* and keep 50% of the proceeds to fund ongoing development.
*
* You can:
* A: leave things as they are
* B: disable accounts but display a donate button
* C: hide any reference to paid accounts or donation
/* Pads that are not 'pinned' by any registered user can be set to expire
* after a configurable number of days of inactivity (default 90 days).
* The value can be changed or set to false to remove expiration.
* Expired pads can then be removed using a cron job calling the
* `evict-inactive.js` script with node
*
* If you chose A then there's nothing to do.
* If you chose B, set 'allowSubscriptions' to false.
* If you chose C, set 'removeDonateButton' to true
* defaults to 90 days if nothing is provided
*/
allowSubscriptions: true,
removeDonateButton: false,
/*
* By default, CryptPad also contacts our accounts server once a day to check for changes in
* the people who have accounts. This check-in will also send the version of your CryptPad
* instance and your email so we can reach you if we are aware of a serious problem. We will
* never sell it or send you marketing mail. If you want to block this check-in and remain
* completely invisible, set this and allowSubscriptions both to false.
*/
adminEmail: 'i.did.not.read.my.config@cryptpad.fr',
//inactiveTime: 90, // days
/* Sales coming from your server will be identified by your domain
/* CryptPad archives some data instead of deleting it outright.
* This archived data still takes up space and so you'll probably still want to
* remove these files after a brief period.
*
* If you are using CryptPad in a business context, please consider taking a support contract
* by contacting sales@cryptpad.fr
* cryptpad/scripts/evict-inactive.js is intended to be run daily
* from a crontab or similar scheduling service.
*
* The intent with this feature is to provide a safety net in case of accidental
* deletion. Set this value to the number of days you'd like to retain
* archived data before it's removed permanently.
*
* defaults to 15 days if nothing is provided
*/
myDomain: _domain,
//archiveRetentionTime: 15,
/*
* If you are using CryptPad internally and you want to increase the per-user storage limit,
* change the following value.
*
* Please note: This limit is what makes people subscribe and what pays for CryptPad
* development. Running a public instance that provides a "better deal" than cryptpad.fr
* is effectively using the project against itself.
/* Max Upload Size (bytes)
* this sets the maximum size of any one file uploaded to the server.
* anything larger than this size will be rejected
* defaults to 20MB if no value is provided
*/
defaultStorageLimit: 50 * 1024 * 1024,
//maxUploadSize: 20 * 1024 * 1024,
/*
* CryptPad allows administrators to give custom limits to their friends.
@ -198,8 +207,8 @@ module.exports = {
*
* hint: 1GB is 1024 * 1024 * 1024 bytes
*/
/*
customLimits: {
/*
"https://my.awesome.website/user/#/1/cryptpad-user1/YZgXQxKR0Rcb6r6CmxHPdAGLVludrAF2lEnkbx1vVOo=": {
limit: 20 * 1024 * 1024 * 1024,
plan: 'insider',
@ -210,69 +219,15 @@ module.exports = {
plan: 'insider',
note: 'storage space donated by my.awesome.website'
}
*/
},
*/
/* =====================
* STORAGE
* ===================== */
/* By default the CryptPad server will run scheduled tasks every five minutes
* If you want to run scheduled tasks in a separate process (like a crontab)
* you can disable this behaviour by setting the following value to true
*/
disableIntegratedTasks: false,
/* Pads that are not 'pinned' by any registered user can be set to expire
* after a configurable number of days of inactivity (default 90 days).
* The value can be changed or set to false to remove expiration.
* Expired pads can then be removed using a cron job calling the
* `delete-inactive.js` script with node
*/
inactiveTime: 90, // days
/* CryptPad can be configured to remove inactive data which has not been pinned.
* Deletion of data is always risky and as an operator you have the choice to
* archive data instead of deleting it outright. Set this value to true if
* you want your server to archive files and false if you want to keep using
* the old behaviour of simply removing files.
/* Users with premium accounts (those with a plan included in their customLimit)
* can benefit from an increased upload size limit. By default they are restricted to the same
* upload size as any other registered user.
*
* WARNING: this is not implemented universally, so at the moment this will
* only apply to the removal of 'channels' due to inactivity.
*/
retainData: true,
/* As described above, CryptPad offers the ability to archive some data
* instead of deleting it outright. This archived data still takes up space
* and so you'll probably still want to remove these files after a brief period.
* The intent with this feature is to provide a safety net in case of accidental
* deletion. Set this value to the number of days you'd like to retain
* archived data before it's removed permanently.
*
* If 'retainData' is set to false, there will never be any archived data
* to remove.
*/
archiveRetentionTime: 15,
/* Max Upload Size (bytes)
* this sets the maximum size of any one file uploaded to the server.
* anything larger than this size will be rejected
*/
maxUploadSize: 20 * 1024 * 1024,
/* =====================
* HARDWARE RELATED
* ===================== */
/* CryptPad's file storage adaptor closes unused files after a configurable
* number of milliseconds (default 30000 (30 seconds))
*/
channelExpirationMs: 30000,
/* CryptPad's file storage adaptor is limited by the number of open files.
* When the adaptor reaches openFileLimit, it will clean up older files
*/
openFileLimit: 2048,
//premiumUploadSize: 100 * 1024 * 1024,
/* =====================
* DATABASE VOLUMES

2
customize.dist/ckeditor-config.js

@ -11,6 +11,8 @@ CKEDITOR.editorConfig = function( config ) {
config.removePlugins= 'resize,elementspath';
config.resize_enabled= false; //bottom-bar
config.extraPlugins= 'autolink,colorbutton,colordialog,font,indentblock,justify,mediatag,print,blockbase64,mathjax,wordcount';
// FIXME translation for default? updating to a newer CKEditor seems like it will add 'default' by default
config.fontSize_sizes = '(Default)/unset;8/8px;9/9px;10/10px;11/11px;12/12px;14/14px;16/16px;18/18px;20/20px;22/22px;24/24px;26/26px;28/28px;36/36px;48/48px;72/72px';
config.toolbarGroups= [
// {"name":"clipboard","groups":["clipboard","undo"]},
//{"name":"editing","groups":["find","selection"]},

69
customize.dist/loading.js

@ -3,7 +3,6 @@
define([], function () {
var loadingStyle = (function(){/*
#cp-loading {
transition: opacity 0.75s, visibility 0s 0.75s;
visibility: visible;
position: fixed;
z-index: 10000000;
@ -13,7 +12,8 @@ define([], function () {
right: 0px;
background: linear-gradient(to right, #326599 0%, #326599 50%, #4591c4 50%, #4591c4 100%);
color: #fafafa;
font-size: 1.5em;
font-size: 1.3em;
line-height: 120%;
opacity: 1;
display: flex;
flex-flow: column;
@ -23,6 +23,7 @@ define([], function () {
#cp-loading.cp-loading-hidden {
opacity: 0;
visibility: hidden;
transition: opacity 0.75s, visibility 0s 0.75s;
}
#cp-loading .cp-loading-logo {
height: 300px;
@ -77,14 +78,12 @@ define([], function () {
background: #FFF;
padding: 20px;
width: 100%;
color: #000;
text-align: center;
color: #3F4141;
text-align: left;
display: none;
}
#cp-loading-password-prompt {
font-size: 18px;
}
#cp-loading-password-prompt .cp-password-error {
#cp-loading-password-prompt p.cp-password-error {
color: white;
background: #9e0000;
padding: 5px;
@ -94,24 +93,53 @@ define([], function () {
text-align: left;
margin-bottom: 15px;
}
#cp-loading-burn-after-reading .cp-password-info {
margin-bottom: 15px;
}
p.cp-password-info{
text-align: left;
}
#cp-loading-password-prompt .cp-password-form {
display: flex;
justify-content: space-around;
flex-wrap: wrap;
}
#cp-loading-password-prompt .cp-password-form button,
#cp-loading-password-prompt .cp-password-form .cp-password-input {
#cp-loading-password-prompt .cp-password-form button{
background-color: #4591c4;
color: white;
border: 1px solid #4591c4;
}
.cp-password-input{
font-size:16px;
border: 1px solid #4591c4;
background-color: white;
border-radius 0;
}
.cp-password-form button{
padding: 8px 12px;
font-weight: bold;
text-transform: uppercase;
}
#cp-loading-password-prompt .cp-password-form{
width: 100%;
}
#cp-loading-password-prompt .cp-password-form .cp-password-container {
flex-shrink: 1;
min-width: 0;
}
#cp-loading-password-prompt .cp-password-form .cp-password-container .cp-password-reveal{
color: #4591c4;
padding: 0px 24px;
}
#cp-loading-password-prompt .cp-password-form input {
flex: 1;
padding: 0 5px;
padding: 12px;
min-width: 0;
text-overflow: ellipsis;
}
@ -119,7 +147,7 @@ define([], function () {
background-color: #326599;
}
#cp-loading-password-prompt ::placeholder {
color: #d9d9d9;
color: #999999;
opacity: 1;
}
#cp-loading-password-prompt :-ms-input-placeholder {
@ -154,7 +182,7 @@ define([], function () {
background: #222;
color: #fafafa;
text-align: center;
font-size: 1.5em;
font-size: 1.3em;
opacity: 0.7;
font-family: 'Open Sans', 'Helvetica Neue', sans-serif;
padding: 15px;
@ -201,6 +229,19 @@ define([], function () {
animation-timing-function: cubic-bezier(.6,0.15,0.4,0.85);
}
button.primary{
border: 1px solid #4591c4;
padding: 8px 12px;
text-transform: uppercase;
background-color: #4591c4;
color: white;
font-weight: bold;
}
button.primary:hover{
background-color: rgb(52, 118, 162);
}
*/}).toString().slice(14, -3);
var urlArgs = window.location.href.replace(/^.*\?([^\?]*)$/, function (all, x) { return x; });
var elem = document.createElement('div');

10
customize.dist/login.js

@ -180,7 +180,7 @@ define([
return;
}
console.error(decryptedBlock);
//console.error(decryptedBlock);
res.blockInfo = decryptedBlock;
}));
}).nThen(function (waitFor) {
@ -275,7 +275,7 @@ define([
if (res.blockInfo) {
opt = loginOptionsFromBlock(res.blockInfo);
userHash = res.blockInfo.User_hash;
console.error(opt, userHash);
//console.error(opt, userHash);
} else {
console.log("allocating random bytes for a new user object");
opt = allocateBytes(Nacl.randomBytes(Exports.requiredBytes));
@ -293,7 +293,7 @@ define([
return void cb('MODERN_REGISTRATION_INIT');
}
console.error(JSON.stringify(rt.proxy));
//console.error(JSON.stringify(rt.proxy));
// export the realtime object you checked
RT = rt;
@ -458,6 +458,7 @@ define([
UI.removeLoadingScreen(function () {
UI.alert(Messages.login_noSuchUser, function () {
hashing = false;
$('#password').focus();
});
});
break;
@ -465,6 +466,7 @@ define([
UI.removeLoadingScreen(function () {
UI.alert(Messages.login_invalUser, function () {
hashing = false;
$('#password').focus();
});
});
break;
@ -472,6 +474,7 @@ define([
UI.removeLoadingScreen(function () {
UI.alert(Messages.login_invalPass, function () {
hashing = false;
$('#password').focus();
});
});
break;
@ -482,6 +485,7 @@ define([
]);
UI.alert(warning, function () {
hashing = false;
$('#password').focus();
});
});
break;

3
customize.dist/messages.js

@ -5,13 +5,16 @@ var map = {
'de': 'Deutsch',
'el': 'Ελληνικά',
'es': 'Español',
'fi': 'Suomalainen',
'fr': 'Français',
//'hi': 'हिन्दी',
'it': 'Italiano',
'nb': 'Norwegian Bokmål',
//'pl': 'Polski',
'pt-br': 'Português do Brasil',
'ro': 'Română',
'ru': 'Русский',
//'sv': 'Svenska',
//'te': 'తెలుగు',
'zh': '繁體中文',
//'nl': 'Nederlands'

8
customize.dist/pages.js

@ -1,9 +1,10 @@
define([
'/common/hyperscript.js',
'/common/common-language.js',
'/customize/application_config.js',
'/customize/messages.js',
'jquery',
], function (h, Language, Msg, $) {
], function (h, Language, AppConfig, Msg, $) {
var Pages = {};
Pages.setHTML = function (e, html) {
@ -58,6 +59,8 @@ define([
return h('a', attrs, text);
};
var imprintUrl = AppConfig.imprint && (typeof(AppConfig.imprint) === "boolean" ?
'/imprint.html' : AppConfig.imprint);
Pages.infopageFooter = function () {
return h('footer', [
h('div.container', [
@ -94,6 +97,7 @@ define([
footerCol('footer_legal', [
footLink('/terms.html', 'footer_tos'),
footLink('/privacy.html', 'privacy'),
AppConfig.imprint ? footLink(imprintUrl, 'imprint') : undefined,
]),
/*footerCol('footer_contact', [
footLink('https://riot.im/app/#/room/#cryptpad:matrix.org', null, 'Chat'),
@ -103,7 +107,7 @@ define([
])*/
])
]),
h('div.cp-version-footer', "CryptPad v3.9.0 (JamaicanMonkey)")
h('div.cp-version-footer', "CryptPad v3.16.0 (Quagga)")
]);
};

53
customize.dist/src/less2/include/alertify.less

@ -72,6 +72,10 @@
z-index: 100000; // alertify container
font: @colortheme_app-font;
.cp-checkmark {
color: @cryptpad_text_col;
}
.cp-inline-alert-text {
flex: 1;
}
@ -168,6 +172,9 @@
margin-bottom: 0;
}
}
.cp-alertify-type-container {
overflow: visible !important;
}
.alertify-tabs {
max-height: 100%;
display: flex;
@ -179,6 +186,7 @@
margin-bottom: 10px;
box-sizing: content-box;
span {
.tools_unselectable();
font-size: 20px;
height: 40px;
line-height: 40px;
@ -187,12 +195,16 @@
border-left: 1px solid lighten(@alertify-base, 10%);
border-right: 1px solid lighten(@alertify-base, 10%);
cursor: pointer;
&:hover {
&:not(.disabled):hover {
background-color: @alertify-light-bg;
}
&.disabled {
color: #949494;
cursor: not-allowed;
}
}
span.alertify-tabs-active {
background-color: @alertify-fore;
background-color: @alertify-fore !important;
border-left: 1px solid @alertify-fore;
border-right: 1px solid @alertify-fore;
color: @alertify-base;
@ -265,7 +277,7 @@
margin-bottom: 15px;
}
button {
button:not('.pure-button') {
display: inline-block;
position: relative;
margin: 6px 8px;
@ -276,11 +288,17 @@
nav {
padding: @alertify_padding-base;
text-align: right;
button {
button, div.cp-button-confirm {
margin: 0px !important;
&:not(:first-child) {
&:not(:first-child):not(.left) {
margin-left: @alertify_padding-base !important;
}
&.left {
float: left;
}
}
div.cp-button-confirm {
vertical-align: middle;
}
}
}
@ -383,18 +401,13 @@
}
}
div.wide {
div.alertify-tabs {
p.msg:not(:last-child) {
border-bottom: 1px solid @alertify-fore;
}
}
.cp-share-columns {
display: flex;
flex-flow: row;
& > .cp-share-column {
width: 50%;
padding: 0 10px;
//padding: 0 10px;
position: relative;
&.contains-nav {
nav {
@ -411,7 +424,20 @@
}
}
&:first-child {
border-right: 1px solid @alertify-fore;
margin-right: @alertify_padding-base;
}
&:last-child {
margin-left: @alertify_padding-base;
}
}
& > .cp-share-column-mid {
display: flex;
align-items: center;
button {
width: 50px;
margin: 0;
min-width: 0;
font-size: 18px !important;
}
}
}
@ -436,6 +462,9 @@
i {
margin-right: 10px;
}
&.cp-alert-top {
margin-top: @alertify_padding-base;
}
&.alert-primary {
background-color: @alertify-base;
color: @alertify-fg;

52
customize.dist/src/less2/include/buttons.less

@ -10,7 +10,7 @@
@alertify-input-fg: @colortheme_modal-input-fg;
input:not(.form-control), textarea {
background-color: @alertify-input-fg;
// background-color: @alertify-input-fg;
color: @cryptpad_text_col;
border: 1px solid @alertify-input-bg;
width: 100%;
@ -23,6 +23,27 @@
}
}
input:not(.form-control) {
height: @variables_input-height;
}
div.cp-alertify-type {
display: flex;
input {
margin: 0;
flex: 1;
min-width: 0;
}
span {
button {
margin: 0;
height: 100%;
margin-left: -1px;
text-transform: unset !important;
}
}
}
textarea {
padding: 8px;
&[readonly] {
@ -31,6 +52,25 @@
}
}
div.cp-button-confirm {
display: inline-block;
button {
margin: 0;
}
.cp-button-timer {
height: 3px;
& > div {
height: 100%;
background-color: @colortheme_alertify-primary;
&.danger, &.btn-danger, &.danger-alt, &.btn-danger-alt {
background-color: @colortheme_alertify-red;
}
}
}
}
button.cp-button-confirm-placeholder {
margin-bottom: 3px !important;
}
button:not(.pure-button):not(.md-button):not(.mdl-button) {
@ -68,6 +108,7 @@
white-space: normal;
font-weight: bold;
}
&.danger, &.btn-danger {
background-color: @colortheme_alertify-red;
border-color: @colortheme_alertify-red-border;
@ -77,6 +118,15 @@
}
}
&.danger-alt, &.btn-danger-alt {
border-color: @colortheme_alertify-red;
color: @colortheme_alertify-red;
&:hover, &:active {
color: @colortheme_alertify-red-color;
background-color: contrast(@colortheme_modal-bg, darken(@colortheme_alertify-red, 10%), lighten(@colortheme_alertify-red, 10%));
}
}
&.safe, &.btn-safe {
background-color: @colortheme_alertify-green;
border-color: @colortheme_alertify-green-border;

4
customize.dist/src/less2/include/colortheme.less

@ -133,9 +133,9 @@
@colortheme_ooslide-color: #FFF;
@colortheme_ooslide-warn: #cd2532;
@colortheme_oocell-bg: #7e983f;
@colortheme_oocell-bg: #40865c;
@colortheme_oocell-color: #FFF;
@colortheme_oocell-warn: #cd2532;
@colortheme_oocell-warn: #ffbcc0;
@colortheme_kanban-bg: #8C4;
@colortheme_kanban-color: #000;

113
customize.dist/src/less2/include/corner.less

@ -4,9 +4,9 @@
--LessLoader_require: LessLoader_currentFile();
};
& {
@corner-button-ok: #2c9b00;
@corner-button-cancel: #990000;
@corner-link: #ffff7a;
@corner-blue: @colortheme_logo-1;
@corner-white: @colortheme_base;
@keyframes appear {
0% {
@ -27,21 +27,23 @@
.cp-corner-container {
position: absolute;
right: 0;
bottom: 0;
width: 300px;
height: 200px;
border-top-left-radius: 200px;
padding: 15px;
text-align: right;
background-color: @colortheme_logo-1;
color: @colortheme_base;
right: 10px;
bottom: 10px;
width: 350px;
padding: 10px;
background-color: fade(@corner-blue, 95%);
border: 1px solid @corner-blue;
color: @corner-white;
z-index: 9999;
transform-origin: bottom right;
animation: appear 0.8s ease-in-out;
box-shadow: 0 0 10px 0 @colortheme_logo-1;
//transform: scale(0.1);
//transform: scale(1);
//box-shadow: 0 0 10px 0 @corner-blue;
&.cp-corner-alt {
background-color: fade(@corner-white, 95%);
border: 1px solid @corner-blue;
color: @corner-blue;
}
h1, h2, h3 {
font-size: 1.5em;
@ -64,7 +66,7 @@
line-height: 15px;
display: none;
&:hover {
color: darken(@colortheme_base, 15%);
color: darken(@corner-white, 15%);
}
}
.cp-corner-minimize {
@ -86,46 +88,95 @@
}
}
&.cp-corner-big {
width: 400px;
height: 250px;
width: 500px;
}
.cp-corner-dontshow {
cursor: pointer;
.fa {
margin-right: 0.3em;
font-size: 1.1em;
}
&:hover {
color: darken(@corner-white, 10%);
}
}
&.cp-corner-alt {
.cp-corner-dontshow {
&:hover {
color: lighten(@corner-blue, 10%);
}
}
}
.cp-corner-actions {
min-height: 30px;
margin: 15px auto;
display: inline-block;
margin: 10px auto;
display: block;
text-align: right;
}
.cp-corner-footer {
font-style: italic;
font-size: 0.8em;
}
.cp-corner-footer, .cp-corner-text {
a {
color: @corner-link;
color: @corner-white;
text-decoration: underline;
&:hover {
color: darken(@corner-link, 20%);
color: darken(@corner-white, 10%);
}
}
}
&.cp-corner-alt a {
color: @corner-blue;
&:hover {
color: lighten(@corner-blue, 10%);
}
}
button {
border: 0px;
padding: 5px;
color: @colortheme_base;
margin-left: 5px;
color: @corner-white;
&:not(:first-child) {
margin-left: 10px;
}
outline: none;
text-transform: uppercase;
border: 1px solid @corner-white;
.fa, .cptools {
margin-right: 0.3em;
}
&.cp-corner-primary {
background-color: @corner-button-ok;
font-weight: bold;
background-color: @corner-white;
color: @corner-blue;
&:hover {
background-color: lighten(@corner-button-ok, 10%);
background-color: lighten(@corner-blue, 50%);
border-color: lighten(@corner-blue, 50%);
}
}
&.cp-corner-cancel {
background-color: @corner-button-cancel;
margin-left: 10px;
background-color: @corner-blue;
color: @corner-white;
&:hover {
background-color: darken(@corner-blue, 10%);
}
}
}
&.cp-corner-alt button {
border-color: @corner-blue;
&.cp-corner-primary {
background-color: @corner-blue;
color: @corner-white;
&:hover {
background-color: darken(@corner-blue, 10%);
border-color: darken(@corner-blue, 10%);
}
}
&.cp-corner-cancel {
background-color: @corner-white;
color: @corner-blue;
&:hover {
background-color: lighten(@corner-button-cancel, 10%);
background-color: lighten(@corner-blue, 50%);
}
}
}

3
customize.dist/src/less2/include/dropdown.less

@ -17,8 +17,7 @@
button {
.fa-caret-down {
margin-right: 0px;
margin-left: 5px;
margin-right: 1em !important;
}
* {
.tools_unselectable();

7
customize.dist/src/less2/include/fileupload.less

@ -14,9 +14,11 @@
right: 10vw;
bottom: 10vh;
box-sizing: border-box;
z-index: 1000000; //Z file upload table container
z-index: 100000; //Z file upload table container
display: none;
color: darken(@colortheme_drive-bg, 10%);
max-height: 180px;
overflow-y: auto;
@media screen and (max-width: @browser_media-medium-screen) {
left: 5vw; right: 5vw; bottom: 5vw;
@ -26,6 +28,9 @@
display: flex;
background-color: darken(@colortheme_modal-bg, 10%);
font-weight: bold;
position: sticky;
top: 0;
z-index: 1;
.cp-fileupload-header-title {
padding: 0.25em 0.5em;
flex-grow: 1;

49
customize.dist/src/less2/include/markdown.less

@ -1,3 +1,5 @@
@import (reference) "./tools.less";
.markdown_main() {
blockquote {
background: #e5e5e5;
@ -53,6 +55,53 @@
}
}
.markdown_cryptpad() {
word-wrap: break-word;
h1, h2, h3, h4, h5, h6 {
font-weight: bold;
padding-bottom: 0.3em;
border-bottom: 1px solid #eee;
}
li {
min-height: 22px;
}
.todo-list-item {
list-style: none;
position: relative;
.fa {
position: absolute;
margin-left: -17px;
margin-top: 4px;
}
}
media-tag {
cursor: pointer;
* {
max-width: 100%;
}
iframe[src$=".pdf"] {
width: 100%;
height: 80vh;
max-height: 90vh;
}
}
media-tag:empty {
width: 100px;
height: 100px;
display: inline-block;
border: 1px solid #BBB;
}
pre.mermaid {
svg {
max-width: 100%;
cursor: pointer;
.tools_unselectable();
}
}
}
.markdown_preformatted-code (@color: #333) {
pre > code {
display: block;

17
customize.dist/src/less2/include/modal.less

@ -1,5 +1,8 @@
@import (reference) "./colortheme-all.less";
@import (reference) "./variables.less";
@import (reference) './buttons.less';
.modal_base() {
font-family: @colortheme_font;
@ -24,6 +27,9 @@
.cp-modal-container {
display: none;
align-items: center;
justify-content: center;
z-index: 100000; //Z modal container
position: absolute;
top: 0;
@ -33,15 +39,19 @@
background-color: @colortheme_modal-dim;
.cp-modal {
.buttons_main();
background-color: @colortheme_modal-bg;
color: @colortheme_modal-fg;
box-shadow: @variables_shadow;
padding: @variables_padding;
position: absolute;
top: 15vh; bottom: 15vh;
left: 10vw; right: 10vw;
position: relative;
//top: 15vh; bottom: 15vh;
//left: 10vw; right: 10vw;
width: 90vw;
max-height: 95vh;
overflow: auto;
@ -70,6 +80,7 @@
background-color: @colortheme_modal-input-fg;
color: @cryptpad_text_col;
border: 1px solid @colortheme_modal-input;
width: auto;
}
}

120
customize.dist/src/less2/include/modals-ui-elements.less

@ -14,6 +14,9 @@
.radio-group {
display: flex;
flex-direction: row;
&:not(:last-child) {
margin-bottom: 8px;
}
.cp-radio {
margin-right: 30px;
}
@ -23,12 +26,70 @@
// Properties modal
.cp-app-prop {
margin-bottom: 10px;
.cp-app-prop-hint {
color: @cryptpad_text_col;
font-size: 0.8em;
margin-bottom: 5px;
}
.cp-app-prop-size-container {
height: 20px;
background-color: @colortheme_logo-2;
margin: 10px 0;
padding: 0;
div {
height: 20px;
margin: 0;
padding: 0;
background-color: #CCCCCC;
}
}
.cp-app-prop-size-legend {
color: @colortheme_modal-fg;
display: flex;
margin: 10px 0;
& > div {
display: flex;
align-items: center;
flex-basis: 50%;
margin: 0;
padding: 0;
}
.cp-app-prop-history-size-color, .cp-app-prop-contents-size-color {
display: inline-block;
height: 20px;
width: 20px;
margin-right: 10px;
}
.cp-app-prop-history-size-color {
background-color: #CCCCCC;
}
.cp-app-prop-contents-size-color {
background-color: @colortheme_logo-2;
}
}
}
.cp-app-prop-content {
color: @cryptpad_text_col;
}
// Access modal
.cp-overlay-container {
position: relative;
.cp-overlay {
position: absolute;
background-color: rgba(255,255,255,0.5);
top: 0;
bottom: 0;
left: 0;
right: 0;
}
}
.cp-access-margin-right {
margin-right: 5px !important;
}
// teams invite modal
.cp-teams-invite-block {
display: flex;
@ -50,4 +111,63 @@
.cp-teams-help {
margin-left: 10px;
}
// mediatag preview
#cp-mediatag-preview-modal {
.cp-modal {
display: flex;
justify-content: center;
.cp-mediatag-container {
width: 100%;
flex: 1;
min-width: 0;
overflow: auto;
media-tag {
& > * {
max-width: 100%;
max-height: 100%;
}
video, iframe {
margin-bottom: -5px;
}
button {
line-height: 1.5;
}
& > iframe {
width: 100%;
height: 100%;
min-height: 75vh;
}
& > .plain-text-reader {
white-space: pre-wrap;
text-align: left;
word-break: break-word;
color: @cryptpad_text_col;
padding: 5px;
}
}
pre.mermaid {
overflow: unset;
margin-bottom: 0;
}
.cp-spinner {
border-color: @colortheme_logo-1;
border-top-color: transparent;
}
}
.cp-mediatag-control {
align-self: center;
.fa {
margin: 10px;
cursor: pointer;
}
}
.cp-mediatag-outer {
display: flex;
height: 100%;
width: 100%;
align-items: center;
}
}
}
}

9
customize.dist/src/less2/include/notifications.less

@ -8,6 +8,7 @@
@notif-height: 50px;
.cp-notifications-container {
max-width: 300px;
width: 300px;
display: flex;
flex-flow: column;
& hr {
@ -16,6 +17,14 @@
.cp-notification {
min-height: @notif-height;
display: flex;
.cp-avatar {
.avatar_main(30px);
padding: 0 5px;
cursor: pointer;
&:hover {
background-color: rgba(0,0,0,0.1);
}
}
.cp-notification-content {
flex: 1;
align-items: stretch;

26
customize.dist/src/less2/include/sidebar-layout.less

@ -1,6 +1,7 @@
@import (reference) "/customize/src/less2/include/colortheme-all.less";
@import (reference) "/customize/src/less2/include/leftside-menu.less";
@import (reference) "/customize/src/less2/include/buttons.less";
@import (reference) "/customize/src/less2/include/browser.less";
@sidebar_button-width: 400px;
@ -73,6 +74,7 @@
padding: 5px 20px;
color: @rightside-color;
overflow: auto;
padding-bottom: 200px;
// Following rules are only in settings
.cp-sidebarlayout-element {
@ -96,7 +98,7 @@
}
}
margin-bottom: 20px;
.buttons_main();
.buttons_main();
}
[type="text"], [type="password"], button {
vertical-align: middle;
@ -107,6 +109,7 @@
.cp-sidebarlayout-input-block {
display: inline-flex;
width: @sidebar_button-width;
max-width: 100%;
input {
flex: 1;
//border-radius: 0.25em 0 0 0.25em;
@ -117,6 +120,8 @@
//border-radius: 0 0.25em 0.25em 0;
//border: 1px solid #adadad;
border-left: 0px;
height: @variables_input-height;
margin: 0 !important;
}
}
&>div {
@ -161,6 +166,25 @@
}
*/
}
@media screen and (max-width: @browser_media-medium-screen) {
flex-flow: column;
overflow: auto;
#cp-sidebarlayout-leftside {
width: 100% !important; // Override "narrow" mode
padding-bottom: 20px;
.cp-sidebarlayout-categories {
.cp-sidebarlayout-category {
margin: 0;
span.cp-sidebar-layout-category-name {
display: inline !important; // override "narrow" mode
}
}
}
}
#cp-sidebarlayout-rightside {
overflow: unset;
}
}
}
}

13
customize.dist/src/less2/include/tokenfield.less

@ -1,3 +1,4 @@
@import (reference) "./colortheme-all.less";
@import (reference) "./tools.less";
.tokenfield_main () {
@ -11,7 +12,6 @@
.tools_unselectable();
display: flex;
flex-wrap: wrap;
justify-content: space-around;
height: auto;
min-height: 34px;
padding-bottom: 0px;
@ -22,21 +22,19 @@
width: ~"calc(100% - 20px)";
.token {
box-sizing: border-box;
border-radius: 3px;
display: inline-flex;
align-items: center;
border: 1px solid #d9d9d9;
background-color: #ededed;
background-color: rgba(0, 0, 0, 0.1);
white-space: nowrap;
margin: 2px 0;
margin-right: 5px;
height: 24px;
vertical-align: middle;
cursor: default;
color: #222;
color: @cryptpad_text_col;
&:hover {
border-color: #b9b9b9;
background-color: rgba(0, 0, 0, 0.2);
}
&.invalid {
background: none;
@ -57,6 +55,7 @@
vertical-align: middle;
}
.close {
opacity: 1;
font-family: Arial;
display: inline-block;
line-height: 1.49em;

18
customize.dist/src/less2/include/toolbar.less

@ -97,6 +97,12 @@
.ckeditor_fix();
.cp-burn-after-reading {
text-align: center;
font-size: @colortheme_app-font-size !important;
margin: 0 !important;
}
.cp-markdown-toolbar {
height: @toolbar_line-height;
background-color: @toolbar-bg-color-l20;
@ -162,6 +168,7 @@
position: relative;
order: -2;
resize: horizontal;
z-index: 1;
#cp-app-contacts-container {
height: 100%;
}
@ -190,6 +197,7 @@
padding: 10px;
box-sizing: border-box;
order: -3;
z-index: 1;
.cp-toolbar-userlist-drawer-close {
position: absolute;
margin-top: -10px;
@ -363,7 +371,7 @@
width: 200px;
display: flex;
align-items: center;
.fa {
.fa, .cptools {
font-size: 32px;
min-width: 50px;
}
@ -1151,6 +1159,11 @@
margin-left: 11px;
}
}
&.fa-unlock-alt {
.cp-toolbar-drawer-element {
margin-left: 15px;
}
}
&.fa-question {
.cp-toolbar-drawer-element {
margin-left: 16px;
@ -1162,10 +1175,13 @@
}
order: 8;
&.fa-history { order: 1; }
&.fa-clone { order: 1; }
&.fa-download { order: 2; }
&.fa-upload { order: 3; }
&.fa-print { order: 4; }
&.fa-arrows-h { order: 5; }
&.fa-cog { order: 5; }
&.fa-paint-brush { order: 5; }
&.fa-info-circle { order: 6; }
&.fa-help { order: 7; }
}

23
customize.dist/src/less2/include/usergrid.less

@ -55,7 +55,7 @@
justify-content: center;
align-items: center;
padding: 5px;
margin: 3px;
margin: 3px !important;
cursor: default;
transition: order 0.5s, background-color 0.5s;
margin-top: 1px;
@ -109,6 +109,27 @@
color: @colortheme_alertify-primary-text;
}
}
.fa-times {
padding-left: 5px;
cursor: pointer;
height: 100%;
line-height: 25px;
color: @cryptpad_text_col;
&:hover {
color: lighten(@cryptpad_text_col, 10%);
}
}
}
&.list {
.cp-usergrid-user {
width: auto;
max-width: calc(100% - 6px);
overflow: hidden;
white-space: nowrap;
text-overflow: ellipsis;
display: inline-flex;
flex: unset;
}
}
}
}

1
customize.dist/src/less2/include/variables.less

@ -3,6 +3,7 @@
// Elements size
@variables_bar-height: 32px;
@variables_input-height: 38px;
// Used in modal.less and alertify.less
@variables_padding: 12px;

2
customize.dist/src/outer.css

@ -2,7 +2,7 @@ html, body {
margin: 0px;
padding: 0px;
}
#sbox-iframe, #sbox-share-iframe, #sbox-filePicker-iframe {
#sbox-iframe, #sbox-secure-iframe {
position: fixed;
top:0; left:0;
bottom:0; right:0;

14
customize.dist/translations/messages.hi.js

@ -0,0 +1,14 @@
/*
* You can override the translation text using this file.
* The recommended method is to make a copy of this file (/customize.dist/translations/messages.{LANG}.js)
in a 'customize' directory (/customize/translations/messages.{LANG}.js).
* If you want to check all the existing translation keys, you can open the internal language file
but you should not change it directly (/common/translations/messages.{LANG}.js)
*/
define(['/common/translations/messages.hi.js'], function (Messages) {
// Replace the existing keys in your copied file here:
// Messages.button_newpad = "New Rich Text Document";
return Messages;
});

14
customize.dist/translations/messages.sv.js

@ -0,0 +1,14 @@
/*
* You can override the translation text using this file.
* The recommended method is to make a copy of this file (/customize.dist/translations/messages.{LANG}.js)
in a 'customize' directory (/customize/translations/messages.{LANG}.js).
* If you want to check all the existing translation keys, you can open the internal language file
but you should not change it directly (/common/translations/messages.{LANG}.js)
*/
define(['/common/translations/messages.sv.js'], function (Messages) {
// Replace the existing keys in your copied file here:
// Messages.button_newpad = "New Rich Text Document";
return Messages;
});

28
docs/cryptpad.service

@ -0,0 +1,28 @@
[Unit]
Description=CryptPad API server
[Service]
ExecStart=/home/cryptpad/.nvm/versions/node/v12.14.0/bin/node /home/cryptpad/cryptpad/server.js
# modify to match the location of your cryptpad repository
WorkingDirectory=/home/cryptpad/cryptpad
Restart=always
# Restart service after 10 seconds if node service crashes
RestartSec=2
# Output to syslog
StandardOutput=syslog
StandardError=syslog
SyslogIdentifier=cryptpad
User=cryptpad
Group=cryptpad
# modify to match your working directory
Environment='PWD="/home/cryptpad/cryptpad/cryptpad"'
# systemd sets the open file limit to 4000 unless you override it
# cryptpad stores its data with the filesystem, so you should increase this to match the value of `ulimit -n`
# or risk EMFILE errors.
LimitNOFILE=1000000
[Install]
WantedBy=multi-user.target

11
docs/example.nginx.conf

@ -54,6 +54,7 @@ server {
add_header Strict-Transport-Security "max-age=31536000; includeSubDomains" always;
add_header X-XSS-Protection "1; mode=block";
add_header X-Content-Type-Options nosniff;
add_header Access-Control-Allow-Origin "*";
# add_header X-Frame-Options "SAMEORIGIN";
# Insert the path to your CryptPad repository root here
@ -72,7 +73,7 @@ server {
set $styleSrc "'unsafe-inline' 'self' ${main_domain}";
# connect-src restricts URLs which can be loaded using script interfaces
set $connectSrc "'self' https://${main_domain} $main_domain https://${api_domain} blob:";
set $connectSrc "'self' https://${main_domain} ${main_domain} https://${api_domain} blob: wss://${api_domain} ${api_domain} ${files_domain}";
# fonts can be loaded from data-URLs or the main domain
set $fontSrc "'self' data: ${main_domain}";
@ -96,21 +97,21 @@ server {
set $workerSrc "https://${main_domain}";
# script-src specifies valid sources for javascript, including inline handlers
set $scriptSrc "'self' ${main_domain}";
set $scriptSrc "'self' resource: ${main_domain}";
set $unsafe 0;
# the following assets are loaded via the sandbox domain
# they unfortunately still require exceptions to the sandboxing to work correctly.
if ($uri = "/pad/inner.html") { set $unsafe 1; }
if ($uri = "/sheet/inner.html") { set $unsafe 1; }
if ($uri = "/common/onlyoffice/web-apps/apps/spreadsheeteditor/main/index.html") { set $unsafe 1; }
if ($uri ~ ^\/common\/onlyoffice\/.*\/index\.html.*$) { set $unsafe 1; }
# everything except the sandbox domain is a privileged scope, as they might be used to handle keys
if ($host != sandbox.cryptpad.info) { set $unsafe 0; }
if ($host != $sandbox_domain) { set $unsafe 0; }
# privileged contexts allow a few more rights than unprivileged contexts, though limits are still applied
if ($unsafe) {
set $scriptSrc "'self' 'unsafe-eval' 'unsafe-inline' ${main_domain}";
set $scriptSrc "'self' 'unsafe-eval' 'unsafe-inline' resource: ${main_domain}";
}
# Finally, set all the rules you composed above.

1011
historyKeeper.js
File diff suppressed because it is too large
View File

65
import

@ -1,65 +0,0 @@
#!/usr/bin/env node
/* globals process */
var Config = require("./config");
var Fs = require("fs");
var Storage = require(Config.storage);
var args = process.argv.slice(2);
if (!args.length) {
console.log("Insufficient arguments!");
console.log("Pass a path to a database backup!");
process.exit();
}
var dump = Fs.readFileSync(args[0], 'utf-8');
var ready = function (store) {
var lock = 0;
dump.split(/\n/)
.filter(function (line) {
return line;
})
.forEach(function (line, i) {
lock++;
var parts;
var channel;
var msg;
line.replace(/^(.*?)\|(.*)$/, function (all, c, m) {
channel = c;
msg = m;
return '';
});
if (!channel || !msg) {
console.log("BAD LINE on line %s", i);
return;
}
try {
JSON.parse(msg);
} catch (err) {
console.log("BAD LINE on line %s", i);
console.log(msg);
console.log();
}
store.message(channel, msg, function () {
console.log(line);
lock--;
if (!lock) {
console.log("DONE");
process.exit(0);
}
});
});
};
Storage.create(Config, function (store) {
console.log("READY");
ready(store);
});

35
lib/api.js

@ -0,0 +1,35 @@
/* jshint esversion: 6 */
const WebSocketServer = require('ws').Server;
const NetfluxSrv = require('chainpad-server');
module.exports.create = function (config) {
// asynchronously create a historyKeeper and RPC together
require('./historyKeeper.js').create(config, function (err, historyKeeper) {
if (err) { throw err; }
var log = config.log;
// spawn ws server and attach netflux event handlers
NetfluxSrv.create(new WebSocketServer({ server: config.httpServer}))
.on('channelClose', historyKeeper.channelClose)
.on('channelMessage', historyKeeper.channelMessage)
.on('channelOpen', historyKeeper.channelOpen)
.on('sessionClose', historyKeeper.sessionClose)
.on('error', function (error, label, info) {
if (!error) { return; }
if (['EPIPE', 'ECONNRESET'].indexOf(error && error.code) !== -1) { return; }
/* labels:
SEND_MESSAGE_FAIL, SEND_MESSAGE_FAIL_2, FAIL_TO_DISCONNECT,
FAIL_TO_TERMINATE, HANDLE_CHANNEL_LEAVE, NETFLUX_BAD_MESSAGE,
NETFLUX_WEBSOCKET_ERROR, NF_ENOENT
*/
log.error(label, {
code: error.code,
message: error.message,
stack: error.stack,
info: info,
});
})
.register(historyKeeper.id, historyKeeper.directMessage);
});
};

197
lib/commands/admin-rpc.js

@ -0,0 +1,197 @@
/*jshint esversion: 6 */
/* globals process */
const nThen = require("nthen");
const getFolderSize = require("get-folder-size");
const Util = require("../common-util");
const Ulimit = require("ulimit");
var Fs = require("fs");
var Admin = module.exports;
var getFileDescriptorCount = function (Env, server, cb) {
Fs.readdir('/proc/self/fd', function(err, list) {
if (err) { return void cb(err); }
cb(void 0, list.length);
});
};
var getFileDescriptorLimit = function (env, server, cb) {
Ulimit(cb);
};
var getCacheStats = function (env, server, cb) {
var metaSize = 0;
var channelSize = 0;
var metaCount = 0;
var channelCount = 0;
try {
var meta = env.metadata_cache;
for (var x in meta) {
if (meta.hasOwnProperty(x)) {
metaCount++;
metaSize += JSON.stringify(meta[x]).length;
}
}
var channels = env.channel_cache;
for (var y in channels) {
if (channels.hasOwnProperty(y)) {
channelCount++;
channelSize += JSON.stringify(channels[y]).length;
}
}
} catch (err) {
return void cb(err && err.message);
}
cb(void 0, {
metadata: metaCount,
metaSize: metaSize,
channel: channelCount,
channelSize: channelSize,
memoryUsage: process.memoryUsage(),
});
};
var getActiveSessions = function (Env, Server, cb) {
var stats = Server.getSessionStats();
cb(void 0, [
stats.total,
stats.unique
]);
};
var shutdown = function (Env, Server, cb) {
if (true) {
return void cb('E_NOT_IMPLEMENTED');
}
// disconnect all users and reject new connections
Server.shutdown();
// stop all intervals that may be running
Object.keys(Env.intervals).forEach(function (name) {
clearInterval(Env.intervals[name]);
});
// set a flag to prevent incoming database writes
// wait until all pending writes are complete
// then process.exit(0);
// and allow system functionality to restart the server
};
var getRegisteredUsers = function (Env, Server, cb) {
Env.batchRegisteredUsers('', cb, function (done) {
var dir = Env.paths.pin;
var folders;
var users = 0;
nThen(function (waitFor) {
Fs.readdir(dir, waitFor(function (err, list) {
if (err) {
waitFor.abort();
return void done(err);
}
folders = list;
}));
}).nThen(function (waitFor) {
folders.forEach(function (f) {
var dir = Env.paths.pin + '/' + f;
Fs.readdir(dir, waitFor(function (err, list) {
if (err) { return; }
users += list.length;
}));
});
}).nThen(function () {
done(void 0, users);
});
});
};
var getDiskUsage = function (Env, Server, cb) {
Env.batchDiskUsage('', cb, function (done) {
var data = {};
nThen(function (waitFor) {
getFolderSize('./', waitFor(function(err, info) {
data.total = info;
}));
getFolderSize(Env.paths.pin, waitFor(function(err, info) {
data.pin = info;
}));
getFolderSize(Env.paths.blob, waitFor(function(err, info) {
data.blob = info;
}));
getFolderSize(Env.paths.staging, waitFor(function(err, info) {
data.blobstage = info;
}));
getFolderSize(Env.paths.block, waitFor(function(err, info) {
data.block = info;
}));
getFolderSize(Env.paths.data, waitFor(function(err, info) {
data.datastore = info;
}));
}).nThen(function () {
done(void 0, data);
});
});
};
var getActiveChannelCount = function (Env, Server, cb) {
cb(void 0, Server.getActiveChannelCount());
};
var flushCache = function (Env, Server, cb) {
Env.flushCache();
cb(void 0, true);
};
// CryptPad_AsyncStore.rpc.send('ADMIN', ['SET_DEFAULT_STORAGE_LIMIT', 1024 * 1024 * 1024 /* 1GB */], console.log)
var setDefaultStorageLimit = function (Env, Server, cb, data) {
var value = Array.isArray(data) && data[1];
if (typeof(value) !== 'number' || value <= 0) { return void cb('EINVAL'); }
var previous = Env.defaultStorageLimit;
var change = {
previous: previous,
current: value,
};
Env.defaultStorageLimit = value;
Env.Log.info('DEFAULT_STORAGE_LIMIT_UPDATE', change);
cb(void 0, change);
};
var commands = {
ACTIVE_SESSIONS: getActiveSessions,
ACTIVE_PADS: getActiveChannelCount,
REGISTERED_USERS: getRegisteredUsers,
DISK_USAGE: getDiskUsage,
FLUSH_CACHE: flushCache,
SHUTDOWN: shutdown,
GET_FILE_DESCRIPTOR_COUNT: getFileDescriptorCount,
GET_FILE_DESCRIPTOR_LIMIT: getFileDescriptorLimit,
SET_DEFAULT_STORAGE_LIMIT: setDefaultStorageLimit,
GET_CACHE_STATS: getCacheStats,
};
Admin.command = function (Env, safeKey, data, _cb, Server) {
var cb = Util.once(Util.mkAsync(_cb));
var admins = Env.admins;
//var unsafeKey = Util.unescapeKeyCharacters(safeKey);
if (admins.indexOf(safeKey) === -1) {
return void cb("FORBIDDEN");
}
var command = commands[data[0]];
if (typeof(command) === 'function') {
return void command(Env, Server, cb, data);
}
return void cb('UNHANDLED_ADMIN_COMMAND');
};

172
lib/commands/block.js

@ -0,0 +1,172 @@
/*jshint esversion: 6 */
/* globals Buffer*/
var Block = module.exports;
const Fs = require("fs");
const Fse = require("fs-extra");
const Path = require("path");
const Nacl = require("tweetnacl/nacl-fast");
const nThen = require("nthen");
const Util = require("../common-util");
/*
We assume that the server is secured against MitM attacks
via HTTPS, and that malicious actors do not have code execution
capabilities. If they do, we have much more serious problems.
The capability to replay a block write or remove results in either
a denial of service for the user whose block was removed, or in the
case of a write, a rollback to an earlier password.
Since block modification is destructive, this can result in loss
of access to the user's drive.
So long as the detached signature is never observed by a malicious
party, and the server discards it after proof of knowledge, replays
are not possible. However, this precludes verification of the signature
at a later time.
Despite this, an integrity check is still possible by the original
author of the block, since we assume that the block will have been
encrypted with xsalsa20-poly1305 which is authenticated.
*/
var validateLoginBlock = function (Env, publicKey, signature, block, cb) { // FIXME BLOCKS
// convert the public key to a Uint8Array and validate it
if (typeof(publicKey) !== 'string') { return void cb('E_INVALID_KEY'); }
var u8_public_key;
try {
u8_public_key = Nacl.util.decodeBase64(publicKey);
} catch (e) {
return void cb('E_INVALID_KEY');
}
var u8_signature;
try {
u8_signature = Nacl.util.decodeBase64(signature);
} catch (e) {
Env.Log.error('INVALID_BLOCK_SIGNATURE', e);
return void cb('E_INVALID_SIGNATURE');
}
// convert the block to a Uint8Array
var u8_block;
try {
u8_block = Nacl.util.decodeBase64(block);
} catch (e) {
return void cb('E_INVALID_BLOCK');
}
// take its hash
var hash = Nacl.hash(u8_block);
// validate the signature against the hash of the content
var verified = Nacl.sign.detached.verify(hash, u8_signature, u8_public_key);
// existing authentication ensures that users cannot replay old blocks
// call back with (err) if unsuccessful
if (!verified) { return void cb("E_COULD_NOT_VERIFY"); }
return void cb(null, u8_block);
};
var createLoginBlockPath = function (Env, publicKey) { // FIXME BLOCKS
// prepare publicKey to be used as a file name
var safeKey = Util.escapeKeyCharacters(publicKey);
// validate safeKey
if (typeof(safeKey) !== 'string') {
return;
}
// derive the full path
// /home/cryptpad/cryptpad/block/fg/fg32kefksjdgjkewrjksdfksjdfsdfskdjfsfd
return Path.join(Env.paths.block, safeKey.slice(0, 2), safeKey);
};
Block.writeLoginBlock = function (Env, safeKey, msg, cb) { // FIXME BLOCKS
//console.log(msg);
var publicKey = msg[0];
var signature = msg[1];
var block = msg[2];
validateLoginBlock(Env, publicKey, signature, block, function (e, validatedBlock) {
if (e) { return void cb(e); }
if (!(validatedBlock instanceof Uint8Array)) { return void cb('E_INVALID_BLOCK'); }
// derive the filepath
var path = createLoginBlockPath(Env, publicKey);
// make sure the path is valid
if (typeof(path) !== 'string') {
return void cb('E_INVALID_BLOCK_PATH');
}
var parsed = Path.parse(path);
if (!parsed || typeof(parsed.dir) !== 'string') {
return void cb("E_INVALID_BLOCK_PATH_2");
}
nThen(function (w) {
// make sure the path to the file exists
Fse.mkdirp(parsed.dir, w(function (e) {
if (e) {
w.abort();
cb(e);
}
}));
}).nThen(function () {
// actually write the block
// flow is dumb and I need to guard against this which will never happen
/*:: if (typeof(validatedBlock) === 'undefined') { throw new Error('should never happen'); } */
/*:: if (typeof(path) === 'undefined') { throw new Error('should never happen'); } */
Fs.writeFile(path, Buffer.from(validatedBlock), { encoding: "binary", }, function (err) {
if (err) { return void cb(err); }
cb();
});
});
});
};
/*
When users write a block, they upload the block, and provide
a signature proving that they deserve to be able to write to
the location determined by the public key.
When removing a block, there is nothing to upload, but we need
to sign something. Since the signature is considered sensitive
information, we can just sign some constant and use that as proof.
*/
Block.removeLoginBlock = function (Env, safeKey, msg, cb) { // FIXME BLOCKS
var publicKey = msg[0];
var signature = msg[1];
var block = Nacl.util.decodeUTF8('DELETE_BLOCK'); // clients and the server will have to agree on this constant
validateLoginBlock(Env, publicKey, signature, block, function (e /*::, validatedBlock */) {
if (e) { return void cb(e); }
// derive the filepath
var path = createLoginBlockPath(Env, publicKey);
// make sure the path is valid
if (typeof(path) !== 'string') {
return void cb('E_INVALID_BLOCK_PATH');
}
// FIXME COLDSTORAGE
Fs.unlink(path, function (err) {
Env.Log.info('DELETION_BLOCK_BY_OWNER_RPC', {
publicKey: publicKey,
path: path,
status: err? String(err): 'SUCCESS',
});
if (err) { return void cb(err); }
cb();
});
});
};

275
lib/commands/channel.js

@ -0,0 +1,275 @@
/*jshint esversion: 6 */
const Channel = module.exports;
const Util = require("../common-util");
const nThen = require("nthen");
const Core = require("./core");
const Metadata = require("./metadata");
const HK = require("../hk-util");
Channel.clearOwnedChannel = function (Env, safeKey, channelId, cb, Server) {
if (typeof(channelId) !== 'string' || channelId.length !== 32) {
return cb('INVALID_ARGUMENTS');
}
var unsafeKey = Util.unescapeKeyCharacters(safeKey);
Metadata.getMetadata(Env, channelId, function (err, metadata) {
if (err) { return void cb(err); }
if (!Core.hasOwners(metadata)) { return void cb('E_NO_OWNERS'); }
// Confirm that the channel is owned by the user in question
if (!Core.isOwner(metadata, unsafeKey)) {
return void cb('INSUFFICIENT_PERMISSIONS');
}
return void Env.msgStore.clearChannel(channelId, function (e) {
if (e) { return void cb(e); }
cb();
const channel_cache = Env.channel_cache;
const clear = function () {
// delete the channel cache because it will have been invalidated
delete channel_cache[channelId];
};
nThen(function (w) {
Server.getChannelUserList(channelId).forEach(function (userId) {
Server.send(userId, [
0,
Env.historyKeeper.id,
'MSG',
userId,
JSON.stringify({
error: 'ECLEARED',
channel: channelId
})
], w());
});
}).nThen(function () {
clear();
}).orTimeout(function () {
Env.Log.warn("ON_CHANNEL_CLEARED_TIMEOUT", channelId);
clear();
}, 30000);
});
});
};
var archiveOwnedChannel = function (Env, safeKey, channelId, cb, Server) {
var unsafeKey = Util.unescapeKeyCharacters(safeKey);
Metadata.getMetadata(Env, channelId, function (err, metadata) {
if (err) { return void cb(err); }
if (!Core.hasOwners(metadata)) { return void cb('E_NO_OWNERS'); }
if (!Core.isOwner(metadata, unsafeKey)) {
return void cb('INSUFFICIENT_PERMISSIONS');
}
// temporarily archive the file
return void Env.msgStore.archiveChannel(channelId, function (e) {
Env.Log.info('ARCHIVAL_CHANNEL_BY_OWNER_RPC', {
unsafeKey: unsafeKey,
channelId: channelId,
status: e? String(e): 'SUCCESS',
});
if (e) {
return void cb(e);
}
cb(void 0, 'OK');
const channel_cache = Env.channel_cache;
const metadata_cache = Env.metadata_cache;
const clear = function () {
delete channel_cache[channelId];
Server.clearChannel(channelId);
delete metadata_cache[channelId];
};
// an owner of a channel deleted it
nThen(function (w) {
// close the channel in the store
Env.msgStore.closeChannel(channelId, w());
}).nThen(function (w) {
// Server.channelBroadcast would be better
// but we can't trust it to track even one callback,
// let alone many in parallel.
// so we simulate it on this side to avoid race conditions
Server.getChannelUserList(channelId).forEach(function (userId) {
Server.send(userId, [
0,
Env.historyKeeper.id,
"MSG",
userId,
JSON.stringify({
error: 'EDELETED',
channel: channelId,
})
], w());
});
}).nThen(function () {
// clear the channel's data from memory
// once you've sent everyone a notice that the channel has been deleted
clear();
}).orTimeout(function () {
Env.Log.warn('ON_CHANNEL_DELETED_TIMEOUT', channelId);
clear();
}, 30000);
});
});
};
Channel.removeOwnedChannel = function (Env, safeKey, channelId, __cb, Server) {
var _cb = Util.once(Util.mkAsync(__cb));
if (typeof(channelId) !== 'string' || !Core.isValidId(channelId)) {
return _cb('INVALID_ARGUMENTS');
}
// archiving large channels or files can be expensive, so do it one at a time
// for any given user to ensure that nobody can use too much of the server's resources
Env.queueDeletes(safeKey, function (next) {
var cb = Util.both(_cb, next);
if (Env.blobStore.isFileId(channelId)) {
return void Env.removeOwnedBlob(channelId, safeKey, cb);
}
archiveOwnedChannel(Env, safeKey, channelId, cb, Server);
});
};
Channel.trimHistory = function (Env, safeKey, data, cb) {
if (!(data && typeof(data.channel) === 'string' && typeof(data.hash) === 'string' && data.hash.length === 64)) {
return void cb('INVALID_ARGS');
}
var channelId = data.channel;
var unsafeKey = Util.unescapeKeyCharacters(safeKey);
var hash = data.hash;
nThen(function (w) {
Metadata.getMetadata(Env, channelId, w(function (err, metadata) {
if (err) { return void cb(err); }
if (!Core.hasOwners(metadata)) {
w.abort();
return void cb('E_NO_OWNERS');
}
if (!Core.isOwner(metadata, unsafeKey)) {
w.abort();
return void cb("INSUFFICIENT_PERMISSIONS");
}
// else fall through to the next block
}));
}).nThen(function () {
Env.msgStore.trimChannel(channelId, hash, function (err) {
if (err) { return void cb(err); }
// clear historyKeeper's cache for this channel
Env.historyKeeper.channelClose(channelId);
cb(void 0, 'OK');
delete Env.channel_cache[channelId];
delete Env.metadata_cache[channelId];
});
});
};
var ARRAY_LINE = /^\[/;
/* Files can contain metadata but not content
call back with true if the channel log has no content other than metadata
otherwise false
*/
Channel.isNewChannel = function (Env, channel, cb) {
if (!Core.isValidId(channel)) { return void cb('INVALID_CHAN'); }
if (channel.length !== 32) { return void cb('INVALID_CHAN'); }
// TODO replace with readMessagesBin
var done = false;
Env.msgStore.getMessages(channel, function (msg) {
if (done) { return; }
try {
if (typeof(msg) === 'string' && ARRAY_LINE.test(msg)) {
done = true;
return void cb(void 0, false);
}
} catch (e) {
Env.WARN('invalid message read from store', e);
}
}, function () {
if (done) { return; }
// no more messages...
cb(void 0, true);
});
};
/* writePrivateMessage
allows users to anonymously send a message to the channel
prevents their netflux-id from being stored in history
and from being broadcast to anyone that might currently be in the channel
Otherwise behaves the same as sending to a channel
*/
Channel.writePrivateMessage = function (Env, args, _cb, Server, netfluxId) {
var cb = Util.once(Util.mkAsync(_cb));
var channelId = args[0];
var msg = args[1];
// don't bother handling empty messages
if (!msg) { return void cb("INVALID_MESSAGE"); }
// don't support anything except regular channels
if (!Core.isValidId(channelId) || channelId.length !== 32) {
return void cb("INVALID_CHAN");
}
// We expect a modern netflux-websocket-server instance
// if this API isn't here everything will fall apart anyway
if (!(Server && typeof(Server.send) === 'function')) {
return void cb("NOT_IMPLEMENTED");
}
nThen(function (w) {
Metadata.getMetadataRaw(Env, channelId, w(function (err, metadata) {
if (err) {
w.abort();
Env.Log.error('HK_WRITE_PRIVATE_MESSAGE', err);
return void cb('METADATA_ERR');
}
if (!metadata || !metadata.restricted) {
return;
}
var session = HK.getNetfluxSession(Env, netfluxId);
var allowed = HK.listAllowedUsers(metadata);
if (HK.isUserSessionAllowed(allowed, session)) { return; }
w.abort();
cb('INSUFFICIENT_PERMISSIONS');
}));
}).nThen(function () {
// historyKeeper expects something with an 'id' attribute
// it will fail unless you provide it, but it doesn't need anything else
var channelStruct = {
id: channelId,
};
// construct a message to store and broadcast
var fullMessage = [
0, // idk
null, // normally the netflux id, null isn't rejected, and it distinguishes messages written in this way
"MSG", // indicate that this is a MSG
channelId, // channel id
msg // the actual message content. Generally a string
];
// historyKeeper already knows how to handle metadata and message validation, so we just pass it off here
// if the message isn't valid it won't be stored.
Env.historyKeeper.channelMessage(Server, channelStruct, fullMessage);
Server.getChannelUserList(channelId).forEach(function (userId) {
Server.send(userId, fullMessage);
});
cb();
});
};

149
lib/commands/core.js

@ -0,0 +1,149 @@
/*jshint esversion: 6 */
/* globals process */
const Core = module.exports;
const Util = require("../common-util");
const escapeKeyCharacters = Util.escapeKeyCharacters;
//const { fork } = require('child_process');
Core.DEFAULT_LIMIT = 50 * 1024 * 1024;
Core.SESSION_EXPIRATION_TIME = 60 * 1000;
Core.isValidId = function (chan) {
return chan && chan.length && /^[a-zA-Z0-9=+-]*$/.test(chan) &&
[32, 48].indexOf(chan.length) > -1;
};
var makeToken = Core.makeToken = function () {
return Number(Math.floor(Math.random() * Number.MAX_SAFE_INTEGER))
.toString(16);
};
Core.makeCookie = function (token) {
var time = (+new Date());
time -= time % 5000;
return [
time,
process.pid,
token
];
};
var parseCookie = function (cookie) {
if (!(cookie && cookie.split)) { return null; }
var parts = cookie.split('|');
if (parts.length !== 3) { return null; }
var c = {};
c.time = new Date(parts[0]);
c.pid = Number(parts[1]);
c.seq = parts[2];
return c;
};
Core.getSession = function (Sessions, key) {
var safeKey = escapeKeyCharacters(key);
if (Sessions[safeKey]) {
Sessions[safeKey].atime = +new Date();
return Sessions[safeKey];
}
var user = Sessions[safeKey] = {};
user.atime = +new Date();
user.tokens = [
makeToken()
];
return user;
};
Core.expireSession = function (Sessions, safeKey) {
var session = Sessions[safeKey];
if (!session) { return; }
if (session.blobstage) {
session.blobstage.close();
}
delete Sessions[safeKey];
};
Core.expireSessionAsync = function (Env, safeKey, cb) {
setTimeout(function () {
Core.expireSession(Env.Sessions, safeKey);
cb(void 0, 'OK');
});
};
var isTooOld = function (time, now) {
return (now - time) > 300000;
};
Core.expireSessions = function (Sessions) {
var now = +new Date();
Object.keys(Sessions).forEach(function (safeKey) {
var session = Sessions[safeKey];
if (session && isTooOld(session.atime, now)) {
Core.expireSession(Sessions, safeKey);
}
});
};
var addTokenForKey = function (Sessions, publicKey, token) {
if (!Sessions[publicKey]) { throw new Error('undefined user'); }
var user = Core.getSession(Sessions, publicKey);
user.tokens.push(token);
user.atime = +new Date();
if (user.tokens.length > 2) { user.tokens.shift(); }
};
Core.isValidCookie = function (Sessions, publicKey, cookie) {
var parsed = parseCookie(cookie);
if (!parsed) { return false; }
var now = +new Date();
if (!parsed.time) { return false; }
if (isTooOld(parsed.time, now)) {
return false;
}
// different process. try harder
if (process.pid !== parsed.pid) {
return false;
}
var user = Core.getSession(Sessions, publicKey);
if (!user) { return false; }
var idx = user.tokens.indexOf(parsed.seq);
if (idx === -1) { return false; }
if (idx > 0) {
// make a new token
addTokenForKey(Sessions, publicKey, Core.makeToken());
}
return true;
};
// E_NO_OWNERS
Core.hasOwners = function (metadata) {
return Boolean(metadata && Array.isArray(metadata.owners));
};
Core.hasPendingOwners = function (metadata) {
return Boolean(metadata && Array.isArray(metadata.pending_owners));
};
// INSUFFICIENT_PERMISSIONS
Core.isOwner = function (metadata, unsafeKey) {
return metadata.owners.indexOf(unsafeKey) !== -1;
};
Core.isPendingOwner = function (metadata, unsafeKey) {
return metadata.pending_owners.indexOf(unsafeKey) !== -1;
};
Core.haveACookie = function (Env, safeKey, cb) {
cb();
};

189
lib/commands/metadata.js

@ -0,0 +1,189 @@
/*jshint esversion: 6 */
const Data = module.exports;
const Meta = require("../metadata");
const WriteQueue = require("../write-queue");
const Core = require("./core");
const Util = require("../common-util");
const HK = require("../hk-util");
Data.getMetadataRaw = function (Env, channel /* channelName */, _cb) {
const cb = Util.once(Util.mkAsync(_cb));
if (!Core.isValidId(channel)) { return void cb('INVALID_CHAN'); }
if (channel.length !== HK.STANDARD_CHANNEL_LENGTH) { return cb("INVALID_CHAN_LENGTH"); }
var cached = Env.metadata_cache[channel];
if (HK.isMetadataMessage(cached)) {
return void cb(void 0, cached);
}
Env.batchMetadata(channel, cb, function (done) {
Env.computeMetadata(channel, done);
});
};
Data.getMetadata = function (Env, channel, cb, Server, netfluxId) {
Data.getMetadataRaw(Env, channel, function (err, metadata) {
if (err) { return void cb(err); }
if (!(metadata && metadata.restricted)) {
// if it's not restricted then just call back
return void cb(void 0, metadata);
}
const session = HK.getNetfluxSession(Env, netfluxId);
const allowed = HK.listAllowedUsers(metadata);
if (!HK.isUserSessionAllowed(allowed, session)) {
return void cb(void 0, {
restricted: metadata.restricted,
allowed: allowed,
rejected: true,
});
}
cb(void 0, metadata);
});
};
/* setMetadata
- write a new line to the metadata log if a valid command is provided
- data is an object: {
channel: channelId,
command: metadataCommand (string),
value: value
}
*/
var queueMetadata = WriteQueue();
Data.setMetadata = function (Env, safeKey, data, cb, Server) {
var unsafeKey = Util.unescapeKeyCharacters(safeKey);
var channel = data.channel;
var command = data.command;
if (!channel || !Core.isValidId(channel)) { return void cb ('INVALID_CHAN'); }
if (!command || typeof (command) !== 'string') { return void cb('INVALID_COMMAND'); }
if (Meta.commands.indexOf(command) === -1) { return void cb('UNSUPPORTED_COMMAND'); }
queueMetadata(channel, function (next) {
Data.getMetadataRaw(Env, channel, function (err, metadata) {
if (err) {
cb(err);
return void next();
}
if (!Core.hasOwners(metadata)) {
cb('E_NO_OWNERS');
return void next();
}
// if you are a pending owner and not an owner
// you can either ADD_OWNERS, or RM_PENDING_OWNERS
// and you should only be able to add yourself as an owner
// everything else should be rejected
// else if you are not an owner
// you should be rejected
// else write the command
// Confirm that the channel is owned by the user in question
// or the user is accepting a pending ownership offer
if (Core.hasPendingOwners(metadata) &&
Core.isPendingOwner(metadata, unsafeKey) &&
!Core.isOwner(metadata, unsafeKey)) {
// If you are a pending owner, make sure you can only add yourelf as an owner
if ((command !== 'ADD_OWNERS' && command !== 'RM_PENDING_OWNERS')
|| !Array.isArray(data.value)
|| data.value.length !== 1
|| data.value[0] !== unsafeKey) {
cb('INSUFFICIENT_PERMISSIONS');
return void next();
}
// FIXME wacky fallthrough is hard to read
// we could pass this off to a writeMetadataCommand function
// and make the flow easier to follow
} else if (!Core.isOwner(metadata, unsafeKey)) {
cb('INSUFFICIENT_PERMISSIONS');
return void next();
}
// Add the new metadata line
var line = [command, data.value, +new Date()];
var changed = false;
try {
changed = Meta.handleCommand(metadata, line);
} catch (e) {
cb(e);
return void next();
}
// if your command is valid but it didn't result in any change to the metadata,
// call back now and don't write any "useless" line to the log
if (!changed) {
cb(void 0, metadata);
return void next();
}
Env.msgStore.writeMetadata(channel, JSON.stringify(line), function (e) {
if (e) {
cb(e);
return void next();
}
// send the message back to the person who changed it
// since we know they're allowed to see it
cb(void 0, metadata);
next();
const metadata_cache = Env.metadata_cache;
// update the cached metadata
metadata_cache[channel] = metadata;
// it's easy to check if the channel is restricted
const isRestricted = metadata.restricted;
// and these values will be used in any case
const s_metadata = JSON.stringify(metadata);
const hk_id = Env.historyKeeper.id;
if (!isRestricted) {
// pre-allow-list behaviour
// if it's not restricted, broadcast the new metadata to everyone
return void Server.channelBroadcast(channel, s_metadata, hk_id);
}
// otherwise derive the list of users (unsafeKeys) that are allowed to stay
const allowed = HK.listAllowedUsers(metadata);
// anyone who is not allowed will get the same error message
const s_error = JSON.stringify({
error: 'ERESTRICTED',
channel: channel,
});
// iterate over the channel's userlist
const toRemove = [];
Server.getChannelUserList(channel).forEach(function (userId) {
const session = HK.getNetfluxSession(Env, userId);
// if the user is allowed to remain, send them the metadata
if (HK.isUserSessionAllowed(allowed, session)) {
return void Server.send(userId, [
0,
hk_id,
'MSG',
userId,
s_metadata
], function () {});
}
// otherwise they are not in the list.
// send them an error and kick them out!
Server.send(userId, [
0,
hk_id,
'MSG',
userId,
s_error
], function () {});
});
Server.removeFromChannel(channel, toRemove);
});
});
});
};

298
lib/commands/pin-rpc.js

@ -0,0 +1,298 @@
/*jshint esversion: 6 */
const Core = require("./core");
const Pinning = module.exports;
const Util = require("../common-util");
const nThen = require("nthen");
//const escapeKeyCharacters = Util.escapeKeyCharacters;
const unescapeKeyCharacters = Util.unescapeKeyCharacters;
var sumChannelSizes = function (sizes) {
return Object.keys(sizes).map(function (id) { return sizes[id]; })
.filter(function (x) {
// only allow positive numbers
return !(typeof(x) !== 'number' || x <= 0);
})
.reduce(function (a, b) { return a + b; }, 0);
};
// FIXME it's possible for this to respond before the server has had a chance
// to fetch the limits. Maybe we should respond with an error...
// or wait until we actually know the limits before responding
var getLimit = Pinning.getLimit = function (Env, safeKey, cb) {
var unsafeKey = unescapeKeyCharacters(safeKey);
var limit = Env.limits[unsafeKey];
var defaultLimit = typeof(Env.defaultStorageLimit) === 'number'?
Env.defaultStorageLimit: Core.DEFAULT_LIMIT;
var toSend = limit && typeof(limit.limit) === "number"?
[limit.limit, limit.plan, limit.note] : [defaultLimit, '', ''];
cb(void 0, toSend);
};
var getMultipleFileSize = function (Env, channels, cb) {
Env.getMultipleFileSize(channels, cb);
};
var loadUserPins = function (Env, safeKey, cb) {
var session = Core.getSession(Env.Sessions, safeKey);
if (session.channels) {
return cb(session.channels);
}
Env.batchUserPins(safeKey, cb, function (done) {
Env.getPinState(safeKey, function (err, value) {
if (!err) {
// only put this into the cache if it completes
session.channels = value;
}
done(value);
});
});
};
var truthyKeys = function (O) {
try {
return Object.keys(O).filter(function (k) {
return O[k];
});
} catch (err) {
return [];
}
};
var getChannelList = Pinning.getChannelList = function (Env, safeKey, _cb) {
var cb = Util.once(Util.mkAsync(_cb));
loadUserPins(Env, safeKey, function (pins) {
cb(truthyKeys(pins));
});
};
Pinning.getTotalSize = function (Env, safeKey, cb) {
var unsafeKey = unescapeKeyCharacters(safeKey);
var limit = Env.limits[unsafeKey];
// Get a common key if multiple users share the same quota, otherwise take the public key
var batchKey = (limit && Array.isArray(limit.users)) ? limit.users.join('') : safeKey;
Env.batchTotalSize(batchKey, cb, function (done) {
var channels = [];
var addUnique = function (channel) {
if (channels.indexOf(channel) !== -1) { return; }
channels.push(channel);
};
nThen(function (waitFor) {
// Get the channels list for our user account
getChannelList(Env, safeKey, waitFor(function (_channels) {
if (!_channels) {
waitFor.abort();
return done('INVALID_PIN_LIST');
}
_channels.forEach(addUnique);
}));
// Get the channels list for users sharing our quota
if (limit && Array.isArray(limit.users) && limit.users.length > 1) {
limit.users.forEach(function (key) {
if (key === unsafeKey) { return; } // Don't count ourselves twice
getChannelList(Env, key, waitFor(function (_channels) {
if (!_channels) { return; } // Broken user, don't count their quota
_channels.forEach(addUnique);
}));
});
}
}).nThen(function () {
Env.getTotalSize(channels, done);
});
});
};
/* Users should be able to clear their own pin log with an authenticated RPC
*/
Pinning.removePins = function (Env, safeKey, cb) {
Env.pinStore.removeChannel(safeKey, function (err) {
Env.Log.info('DELETION_PIN_BY_OWNER_RPC', {
safeKey: safeKey,
status: err? String(err): 'SUCCESS',
});
if (err) { return void cb(err); }
cb(void 0, 'OK');
});
};
Pinning.trimPins = function (Env, safeKey, cb) {
cb("NOT_IMPLEMENTED");
};
var getFreeSpace = Pinning.getFreeSpace = function (Env, safeKey, cb) {
getLimit(Env, safeKey, function (e, limit) {
if (e) { return void cb(e); }
Pinning.getTotalSize(Env, safeKey, function (e, size) {
if (typeof(size) === 'undefined') { return void cb(e); }
var rem = limit[0] - size;
if (typeof(rem) !== 'number') {
return void cb('invalid_response');
}
cb(void 0, rem);
});
});
};
var getHash = Pinning.getHash = function (Env, safeKey, cb) {
getChannelList(Env, safeKey, function (channels) {
Env.hashChannelList(channels, cb);
});
};
Pinning.pinChannel = function (Env, safeKey, channels, cb) {
if (!channels && channels.filter) {
return void cb('INVALID_PIN_LIST');
}
// get channel list ensures your session has a cached channel list
getChannelList(Env, safeKey, function (pinned) {
var session = Core.getSession(Env.Sessions, safeKey);
// only pin channels which are not already pinned
var toStore = channels.filter(function (channel) {
return pinned.indexOf(channel) === -1;
});
if (toStore.length === 0) {
return void getHash(Env, safeKey, cb);
}
getMultipleFileSize(Env, toStore, function (e, sizes) {
if (typeof(sizes) === 'undefined') { return void cb(e); }
var pinSize = sumChannelSizes(sizes);
getFreeSpace(Env, safeKey, function (e, free) {
if (typeof(free) === 'undefined') {
Env.WARN('getFreeSpace', e);
return void cb(e);
}
if (pinSize > free) { return void cb('E_OVER_LIMIT'); }
Env.pinStore.message(safeKey, JSON.stringify(['PIN', toStore, +new Date()]),
function (e) {
if (e) { return void cb(e); }
toStore.forEach(function (channel) {
session.channels[channel] = true;
});
getHash(Env, safeKey, cb);
});
});
});
});
};
Pinning.unpinChannel = function (Env, safeKey, channels, cb) {
if (!channels && channels.filter) {
// expected array
return void cb('INVALID_PIN_LIST');
}
getChannelList(Env, safeKey, function (pinned) {
var session = Core.getSession(Env.Sessions, safeKey);
// only unpin channels which are pinned
var toStore = channels.filter(function (channel) {
return pinned.indexOf(channel) !== -1;
});
if (toStore.length === 0) {
return void getHash(Env, safeKey, cb);
}
Env.pinStore.message(safeKey, JSON.stringify(['UNPIN', toStore, +new Date()]),
function (e) {
if (e) { return void cb(e); }
toStore.forEach(function (channel) {
delete session.channels[channel];
});
getHash(Env, safeKey, cb);
});
});
};
Pinning.resetUserPins = function (Env, safeKey, channelList, cb) {
if (!Array.isArray(channelList)) { return void cb('INVALID_PIN_LIST'); }
var session = Core.getSession(Env.Sessions, safeKey);
if (!channelList.length) {
return void getHash(Env, safeKey, function (e, hash) {
if (e) { return cb(e); }
cb(void 0, hash);
});
}
var pins = {};
getMultipleFileSize(Env, channelList, function (e, sizes) {
if (typeof(sizes) === 'undefined') { return void cb(e); }
var pinSize = sumChannelSizes(sizes);
getLimit(Env, safeKey, function (e, limit) {
if (e) {
Env.WARN('[RESET_ERR]', e);
return void cb(e);
}
/* we want to let people pin, even if they are over their limit,
but they should only be able to do this once.
This prevents data loss in the case that someone registers, but
does not have enough free space to pin their migrated data.
They will not be able to pin additional pads until they upgrade
or delete enough files to go back under their limit. */
if (pinSize > limit[0] && session.hasPinned) { return void(cb('E_OVER_LIMIT')); }
Env.pinStore.message(safeKey, JSON.stringify(['RESET', channelList, +new Date()]),
function (e) {
if (e) { return void cb(e); }
channelList.forEach(function (channel) {
pins[channel] = true;
});
var oldChannels;
if (session.channels && typeof(session.channels) === 'object') {
oldChannels = Object.keys(session.channels);
} else {
oldChannels = [];
}
// update in-memory cache IFF the reset was allowed.
session.channels = pins;
getHash(Env, safeKey, function (e, hash) {
cb(e, hash);
});
});
});
});
};
Pinning.getFileSize = function (Env, channel, cb) {
Env.getFileSize(channel, cb);
};
/* accepts a list, and returns a sublist of channel or file ids which seem
to have been deleted from the server (file size 0)
we might consider that we should only say a file is gone if fs.stat returns
ENOENT, but for now it's simplest to just rely on getFileSize...
*/
Pinning.getDeletedPads = function (Env, channels, cb) {
Env.getDeletedPads(channels, cb);
};
// FIXME this will be removed from the client
Pinning.isChannelPinned = function (Env, channel, cb) {
return void cb(void 0, true);
};

107
lib/commands/quota.js

@ -0,0 +1,107 @@
/*jshint esversion: 6 */
/* globals Buffer*/
const Quota = module.exports;
const Util = require("../common-util");
const Package = require('../../package.json');
const Https = require("https");
Quota.applyCustomLimits = function (Env) {
var isLimit = function (o) {
var valid = o && typeof(o) === 'object' &&
typeof(o.limit) === 'number' &&
typeof(o.plan) === 'string' &&
typeof(o.note) === 'string';
return valid;
};
// read custom limits from the Environment (taken from config)
var customLimits = (function (custom) {
var limits = {};
Object.keys(custom).forEach(function (k) {
k.replace(/\/([^\/]+)$/, function (all, safeKey) {
var id = Util.unescapeKeyCharacters(safeKey || '');
limits[id] = custom[k];
return '';
});
});
return limits;
}(Env.customLimits || {}));
Object.keys(customLimits).forEach(function (k) {
if (!isLimit(customLimits[k])) { return; }
Env.limits[k] = customLimits[k];
});
};
Quota.updateCachedLimits = function (Env, cb) {
Quota.applyCustomLimits(Env);
if (Env.blockDailyCheck === true ||
(typeof(Env.blockDailyCheck) === 'undefined' && Env.adminEmail === false && Env.allowSubscriptions === false)) {
return void cb();
}
var body = JSON.stringify({
domain: Env.myDomain,
subdomain: Env.mySubdomain || null,
adminEmail: Env.adminEmail,
version: Package.version
});
var options = {
host: 'accounts.cryptpad.fr',
path: '/api/getauthorized',
method: 'POST',
headers: {
"Content-Type": "application/json",
"Content-Length": Buffer.byteLength(body)
}
};
var req = Https.request(options, function (response) {
if (!('' + response.statusCode).match(/^2\d\d$/)) {
return void cb('SERVER ERROR ' + response.statusCode);
}
var str = '';
response.on('data', function (chunk) {
str += chunk;
});
response.on('end', function () {
try {
var json = JSON.parse(str);
Env.limits = json;
Quota.applyCustomLimits(Env);
cb(void 0);
} catch (e) {
cb(e);
}
});
});
req.on('error', function (e) {
Quota.applyCustomLimits(Env);
if (!Env.myDomain) { return cb(); }
// only return an error if your server allows subscriptions
cb(e);
});
req.end(body);
};
// The limits object contains storage limits for all the publicKey that have paid
// To each key is associated an object containing the 'limit' value and a 'note' explaining that limit
Quota.getUpdatedLimit = function (Env, safeKey, cb) { // FIXME BATCH?S
Quota.updateCachedLimits(Env, function (err) {
if (err) { return void cb(err); }
var limit = Env.limits[safeKey];
if (limit && typeof(limit.limit) === 'number') {
return void cb(void 0, [limit.limit, limit.plan, limit.note]);
}
return void cb(void 0, [Env.defaultStorageLimit, '', '']);
});
};

89
lib/commands/upload.js

@ -0,0 +1,89 @@
/*jshint esversion: 6 */
const Upload = module.exports;
const Util = require("../common-util");
const Pinning = require("./pin-rpc");
const nThen = require("nthen");
const Core = require("./core");
Upload.status = function (Env, safeKey, filesize, _cb) { // FIXME FILES
var cb = Util.once(Util.mkAsync(_cb));
// validate that the provided size is actually a positive number
if (typeof(filesize) !== 'number' &&
filesize >= 0) { return void cb('E_INVALID_SIZE'); }
nThen(function (w) {
// if the proposed upload size is within the regular limit
// jump ahead to the next block
if (filesize <= Env.maxUploadSize) { return; }
// if larger uploads aren't explicitly enabled then reject them
if (typeof(Env.premiumUploadSize) !== 'number') {
w.abort();
return void cb('TOO_LARGE');
}
// otherwise go and retrieve info about the user's quota
Pinning.getLimit(Env, safeKey, w(function (err, limit) {
if (err) {
w.abort();
return void cb("E_BAD_LIMIT");
}
var plan = limit[1];
// see if they have a special plan, reject them if not
if (plan === '') {
w.abort();
return void cb('TOO_LARGE');
}
// and that they're not over the greater limit
if (filesize >= Env.premiumUploadSize) {
w.abort();
return void cb("TOO_LARGE");
}
// fallthrough will proceed to the next block
}));
}).nThen(function (w) {
var abortAndCB = Util.both(w.abort, cb);
Env.blobStore.status(safeKey, w(function (err, inProgress) {
// if there's an error something is weird
if (err) { return void abortAndCB(err); }
// we cannot upload two things at once
if (inProgress) { return void abortAndCB(void 0, true); }
}));
}).nThen(function () {
// if yuo're here then there are no pending uploads
// check if you have space in your quota to upload something of this size
Pinning.getFreeSpace(Env, safeKey, function (e, free) {
if (e) { return void cb(e); }
if (filesize >= free) { return cb('NOT_ENOUGH_SPACE'); }
var user = Core.getSession(Env.Sessions, safeKey);
user.pendingUploadSize = filesize;
user.currentUploadSize = 0;
cb(void 0, false);
});
});
};
Upload.upload = function (Env, safeKey, chunk, cb) {
Env.blobStore.upload(safeKey, chunk, cb);
};
Upload.complete = function (Env, safeKey, arg, cb) {
Env.blobStore.complete(safeKey, arg, cb);
};
Upload.cancel = function (Env, safeKey, arg, cb) {
Env.blobStore.cancel(safeKey, arg, cb);
};
Upload.complete_owned = function (Env, safeKey, arg, cb) {
Env.blobStore.completeOwned(safeKey, arg, cb);
};

11
lib/deduplicate.js

@ -1,11 +0,0 @@
// remove duplicate elements in an array
module.exports = function (O) {
// make a copy of the original array
var A = O.slice();
for (var i = 0; i < A.length; i++) {
for (var j = i + 1; j < A.length; j++) {
if (A[i] === A[j]) { A.splice(j--, 1); }
}
}
return A;
};

86
lib/defaults.js

@ -0,0 +1,86 @@
var Default = module.exports;
Default.commonCSP = function (domain) {
domain = ' ' + domain;
// Content-Security-Policy
return [
"default-src 'none'",
"style-src 'unsafe-inline' 'self' " + domain,
"font-src 'self' data:" + domain,
/* child-src is used to restrict iframes to a set of allowed domains.
* connect-src is used to restrict what domains can connect to the websocket.
*
* it is recommended that you configure these fields to match the
* domain which will serve your CryptPad instance.
*/
"child-src blob: *",
// IE/Edge
"frame-src blob: *",
/* this allows connections over secure or insecure websockets
if you are deploying to production, you'll probably want to remove
the ws://* directive, and change '*' to your domain
*/
"connect-src 'self' ws: wss: blob:" + domain,
// data: is used by codemirror
"img-src 'self' data: blob:" + domain,
"media-src * blob:",
// for accounts.cryptpad.fr authentication and cross-domain iframe sandbox
"frame-ancestors *",
""
];
};
Default.contentSecurity = function (domain) {
return (Default.commonCSP(domain).join('; ') + "script-src 'self' resource: " + domain).replace(/\s+/g, ' ');
};
Default.padContentSecurity = function (domain) {
return (Default.commonCSP(domain).join('; ') + "script-src 'self' 'unsafe-eval' 'unsafe-inline' resource: " + domain).replace(/\s+/g, ' ');
};
Default.httpHeaders = function () {
return {
"X-XSS-Protection": "1; mode=block",
"X-Content-Type-Options": "nosniff",
"Access-Control-Allow-Origin": "*"
};
};
Default.mainPages = function () {
return [
'index',
'privacy',
'terms',
'about',
'contact',
'what-is-cryptpad',
'features',
'faq',
'maintenance'
];
};
/* By default the CryptPad server will run scheduled tasks every five minutes
* If you want to run scheduled tasks in a separate process (like a crontab)
* you can disable this behaviour by setting the following value to true
*/
//disableIntegratedTasks: false,
/* CryptPad's file storage adaptor closes unused files after a configurable
* number of milliseconds (default 30000 (30 seconds))
*/
// channelExpirationMs: 30000,
/* CryptPad's file storage adaptor is limited by the number of open files.
* When the adaptor reaches openFileLimit, it will clean up older files
*/
//openFileLimit: 2048,

285
lib/historyKeeper.js

@ -0,0 +1,285 @@
/* jshint esversion: 6 */
const nThen = require('nthen');
const Crypto = require('crypto');
const WriteQueue = require("./write-queue");
const BatchRead = require("./batch-read");
const RPC = require("./rpc");
const HK = require("./hk-util.js");
const Core = require("./commands/core");
const Store = require("./storage/file");
const BlobStore = require("./storage/blob");
const Workers = require("./workers/index");
module.exports.create = function (config, cb) {
const Log = config.log;
var WARN = function (e, output) {
if (e && output) {
Log.warn(e, {
output: output,
message: String(e),
stack: new Error(e).stack,
});
}
};
Log.silly('HK_LOADING', 'LOADING HISTORY_KEEPER MODULE');
// TODO populate Env with everything that you use from config
// so that you can stop passing around your raw config
// and more easily share state between historyKeeper and rpc
const Env = {
Log: Log,
// store
id: Crypto.randomBytes(8).toString('hex'),
metadata_cache: {},
channel_cache: {},
queueStorage: WriteQueue(),
queueDeletes: WriteQueue(),
batchIndexReads: BatchRead("HK_GET_INDEX"),
batchMetadata: BatchRead('GET_METADATA'),
batchRegisteredUsers: BatchRead("GET_REGISTERED_USERS"),
batchDiskUsage: BatchRead('GET_DISK_USAGE'),
batchUserPins: BatchRead('LOAD_USER_PINS'),
batchTotalSize: BatchRead('GET_TOTAL_SIZE'),
//historyKeeper: config.historyKeeper,
intervals: config.intervals || {},
maxUploadSize: config.maxUploadSize || (20 * 1024 * 1024),
premiumUploadSize: false, // overridden below...
Sessions: {},
paths: {},
//msgStore: config.store,
netfluxUsers: {},
pinStore: undefined,
pinnedPads: {},
pinsLoaded: false,
pendingPinInquiries: {},
pendingUnpins: {},
pinWorkers: 5,
limits: {},
admins: [],
WARN: WARN,
flushCache: config.flushCache,
adminEmail: config.adminEmail,
allowSubscriptions: config.allowSubscriptions === true,
blockDailyCheck: config.blockDailyCheck === true,
myDomain: config.myDomain,
mySubdomain: config.mySubdomain, // only exists for the accounts integration
customLimits: config.customLimits || {},
// FIXME this attribute isn't in the default conf
// but it is referenced in Quota
domain: config.domain
};
(function () {
var pes = config.premiumUploadSize;
if (!isNaN(pes) && pes >= Env.maxUploadSize) {
Env.premiumUploadSize = pes;
}
}());
var paths = Env.paths;
var keyOrDefaultString = function (key, def) {
return typeof(config[key]) === 'string'? config[key]: def;
};
var pinPath = paths.pin = keyOrDefaultString('pinPath', './pins');
paths.block = keyOrDefaultString('blockPath', './block');
paths.data = keyOrDefaultString('filePath', './datastore');
paths.staging = keyOrDefaultString('blobStagingPath', './blobstage');
paths.blob = keyOrDefaultString('blobPath', './blob');
Env.defaultStorageLimit = typeof(config.defaultStorageLimit) === 'number' && config.defaultStorageLimit >= 0?
config.defaultStorageLimit:
Core.DEFAULT_LIMIT;
try {
Env.admins = (config.adminKeys || []).map(function (k) {
k = k.replace(/\/+$/, '');
var s = k.split('/');
return s[s.length-1];
});
} catch (e) {
console.error("Can't parse admin keys. Please update or fix your config.js file!");
}
config.historyKeeper = Env.historyKeeper = {
metadata_cache: Env.metadata_cache,
channel_cache: Env.channel_cache,
id: Env.id,
channelMessage: function (Server, channel, msgStruct) {
// netflux-server emits 'channelMessage' events whenever someone broadcasts to a channel
// historyKeeper stores these messages if the channel id indicates that they are
// a channel type with permanent history
HK.onChannelMessage(Env, Server, channel, msgStruct);
},
channelClose: function (channelName) {
// netflux-server emits 'channelClose' events whenever everyone leaves a channel
// we drop cached metadata and indexes at the same time
HK.dropChannel(Env, channelName);
},
channelOpen: function (Server, channelName, userId, wait) {
Env.channel_cache[channelName] = Env.channel_cache[channelName] || {};
var sendHKJoinMessage = function () {
Server.send(userId, [
0,
Env.id,
'JOIN',
channelName
]);
};
// a little backwards compatibility in case you don't have the latest server
// allow lists won't work unless you update, though
if (typeof(wait) !== 'function') { return void sendHKJoinMessage(); }
var next = wait();
var cb = function (err, info) {
next(err, info, sendHKJoinMessage);
};
// only conventional channels can be restricted
if ((channelName || "").length !== HK.STANDARD_CHANNEL_LENGTH) {
return void cb();
}
// gets and caches the metadata...
HK.getMetadata(Env, channelName, function (err, metadata) {
if (err) {
Log.error('HK_METADATA_ERR', {
channel: channelName,
error: err,
});
}
if (!metadata || (metadata && !metadata.restricted)) {
// the channel doesn't have metadata, or it does and it's not restricted
// either way, let them join.
return void cb();
}
// this channel is restricted. verify that the user in question is in the allow list
// construct a definitive list (owners + allowed)
var allowed = HK.listAllowedUsers(metadata);
// and get the list of keys for which this user has already authenticated
var session = HK.getNetfluxSession(Env, userId);
if (HK.isUserSessionAllowed(allowed, session)) {
return void cb();
}
// otherwise they're not allowed.
// respond with a special error that includes the list of keys
// which would be allowed...
// FIXME RESTRICT bonus points if you hash the keys to limit data exposure
cb("ERESTRICTED", allowed);
});
},
sessionClose: function (userId, reason) {
HK.closeNetfluxSession(Env, userId);
if (['BAD_MESSAGE', 'SEND_MESSAGE_FAIL_2'].indexOf(reason) !== -1) {
if (reason && reason.code === 'ECONNRESET') { return; }
return void Log.error('SESSION_CLOSE_WITH_ERROR', {
userId: userId,
reason: reason,
});
}
if (['SOCKET_CLOSED', 'SOCKET_ERROR'].indexOf(reason)) { return; }
Log.verbose('SESSION_CLOSE_ROUTINE', {
userId: userId,
reason: reason,
});
},
directMessage: function (Server, seq, userId, json) {
// netflux-server allows you to register an id with a handler
// this handler is invoked every time someone sends a message to that id
HK.onDirectMessage(Env, Server, seq, userId, json);
},
};
Log.verbose('HK_ID', 'History keeper ID: ' + Env.id);
nThen(function (w) {
// create a pin store
Store.create({
filePath: pinPath,
}, w(function (err, s) {
if (err) { throw err; }
Env.pinStore = s;
}));
// create a channel store
Store.create(config, w(function (err, _store) {
if (err) { throw err; }
config.store = _store;
Env.msgStore = _store; // API used by rpc
Env.store = _store; // API used by historyKeeper
}));
// create a blob store
BlobStore.create({
blobPath: config.blobPath,
blobStagingPath: config.blobStagingPath,
archivePath: config.archivePath,
getSession: function (safeKey) {
return Core.getSession(Env.Sessions, safeKey);
},
}, w(function (err, blob) {
if (err) { throw new Error(err); }
Env.blobStore = blob;
}));
}).nThen(function (w) {
Workers.initialize(Env, {
blobPath: config.blobPath,
blobStagingPath: config.blobStagingPath,
taskPath: config.taskPath,
pinPath: pinPath,
filePath: config.filePath,
archivePath: config.archivePath,
channelExpirationMs: config.channelExpirationMs,
verbose: config.verbose,
openFileLimit: config.openFileLimit,
maxWorkers: config.maxWorkers,
}, w(function (err) {
if (err) {
throw new Error(err);
}
}));
}).nThen(function () {
if (config.disableIntegratedTasks) { return; }
config.intervals = config.intervals || {};
var tasks_running;
config.intervals.taskExpiration = setInterval(function () {
if (tasks_running) { return; }
tasks_running = true;
Env.runTasks(function (err) {
if (err) {
Log.error('TASK_RUNNER_ERR', err);
}
tasks_running = false;
});
}, 1000 * 60 * 5); // run every five minutes
}).nThen(function () {
RPC.create(Env, function (err, _rpc) {
if (err) { throw err; }
Env.rpc = _rpc;
cb(void 0, config.historyKeeper);
});
});
};

910
lib/hk-util.js

@ -0,0 +1,910 @@
/* jshint esversion: 6 */
/* global Buffer */
var HK = module.exports;
const nThen = require('nthen');
const Util = require("./common-util");
const MetaRPC = require("./commands/metadata");
const Nacl = require('tweetnacl/nacl-fast');
const now = function () { return (new Date()).getTime(); };
const ONE_DAY = 1000 * 60 * 60 * 24; // one day in milliseconds
/* getHash
* this function slices off the leading portion of a message which is
most likely unique
* these "hashes" are used to identify particular messages in a channel's history
* clients store "hashes" either in memory or in their drive to query for new messages:
* when reconnecting to a pad
* when connecting to chat or a mailbox
* thus, we can't change this function without invalidating client data which:
* is encrypted clientside
* can't be easily migrated
* don't break it!
*/
const getHash = HK.getHash = function (msg, Log) {
if (typeof(msg) !== 'string') {
if (Log) {
Log.warn('HK_GET_HASH', 'getHash() called on ' + typeof(msg) + ': ' + msg);
}
return '';
}
return msg.slice(0,64);
};
// historyKeeper should explicitly store any channel
// with a 32 character id
const STANDARD_CHANNEL_LENGTH = HK.STANDARD_CHANNEL_LENGTH = 32;
// historyKeeper should not store messages sent to any channel
// with a 34 character id
const EPHEMERAL_CHANNEL_LENGTH = HK.EPHEMERAL_CHANNEL_LENGTH = 34;
const tryParse = HK.tryParse = function (Env, str) {
try {
return JSON.parse(str);
} catch (err) {
Env.Log.error('HK_PARSE_ERROR', {
message: err && err.name,
input: str,
});
}
};
/* sliceCpIndex
returns a list of all checkpoints which might be relevant for a client connecting to a session
* if there are two or fewer checkpoints, return everything you have
* if there are more than two
* return at least two
* plus any more which were received within the last 100 messages
This is important because the additional history is what prevents
clients from forking on checkpoints and dropping forked history.
*/
const sliceCpIndex = HK.sliceCpIndex = function (cpIndex, line) {
// Remove "old" checkpoints (cp sent before 100 messages ago)
const minLine = Math.max(0, (line - 100));
let start = cpIndex.slice(0, -2);
const end = cpIndex.slice(-2);
start = start.filter(function (obj) {
return obj.line > minLine;
});
return start.concat(end);
};
const isMetadataMessage = HK.isMetadataMessage = function (parsed) {
return Boolean(parsed && parsed.channel);
};
HK.listAllowedUsers = function (metadata) {
return (metadata.owners || []).concat((metadata.allowed || []));
};
HK.getNetfluxSession = function (Env, netfluxId) {
return Env.netfluxUsers[netfluxId];
};
HK.isUserSessionAllowed = function (allowed, session) {
if (!session) { return false; }
for (var unsafeKey in session) {
if (allowed.indexOf(unsafeKey) !== -1) {
return true;
}
}
return false;
};
HK.authenticateNetfluxSession = function (Env, netfluxId, unsafeKey) {
var user = Env.netfluxUsers[netfluxId] = Env.netfluxUsers[netfluxId] || {};
user[unsafeKey] = +new Date();
};
HK.closeNetfluxSession = function (Env, netfluxId) {
delete Env.netfluxUsers[netfluxId];
};
// validateKeyStrings supplied by clients must decode to 32-byte Uint8Arrays
const isValidValidateKeyString = function (key) {
try {
return typeof(key) === 'string' &&
Nacl.util.decodeBase64(key).length === Nacl.sign.publicKeyLength;
} catch (e) {
return false;
}
};
var CHECKPOINT_PATTERN = /^cp\|(([A-Za-z0-9+\/=]+)\|)?/;
/* expireChannel is here to clean up channels that should have been removed
but for some reason are still present
*/
const expireChannel = function (Env, channel) {
return void Env.store.archiveChannel(channel, function (err) {
Env.Log.info("ARCHIVAL_CHANNEL_BY_HISTORY_KEEPER_EXPIRATION", {
channelId: channel,
status: err? String(err): "SUCCESS",
});
});
};
/* dropChannel
* cleans up memory structures which are managed entirely by the historyKeeper
*/
const dropChannel = HK.dropChannel = function (Env, chanName) {
delete Env.metadata_cache[chanName];
delete Env.channel_cache[chanName];
};
/* checkExpired
* synchronously returns true or undefined to indicate whether the channel is expired
* according to its metadata
* has some side effects:
* closes the channel via the store.closeChannel API
* and then broadcasts to all channel members that the channel has expired
* removes the channel from the netflux-server's in-memory cache
* removes the channel metadata from history keeper's in-memory cache
FIXME the boolean nature of this API should be separated from its side effects
*/
const checkExpired = function (Env, Server, channel) {
const store = Env.store;
const metadata_cache = Env.metadata_cache;
if (!(channel && channel.length === STANDARD_CHANNEL_LENGTH)) { return false; }
let metadata = metadata_cache[channel];
if (!(metadata && typeof(metadata.expire) === 'number')) { return false; }
// the number of milliseconds ago the channel should have expired
let pastDue = (+new Date()) - metadata.expire;
// less than zero means that it hasn't expired yet
if (pastDue < 0) { return false; }
// if it should have expired more than a day ago...
// there may have been a problem with scheduling tasks
// or the scheduled tasks may not be running
// so trigger a removal from here
if (pastDue >= ONE_DAY) { expireChannel(Env, channel); }
// close the channel
store.closeChannel(channel, function () {
Server.channelBroadcast(channel, {
error: 'EEXPIRED',
channel: channel
}, Env.id);
dropChannel(Env, channel);
});
// return true to indicate that it has expired
return true;
};
const getMetadata = HK.getMetadata = function (Env, channelName, _cb) {
var cb = Util.once(Util.mkAsync(_cb));
var metadata = Env.metadata_cache[channelName];
if (metadata && typeof(metadata) === 'object') {
return void cb(undefined, metadata);
}
MetaRPC.getMetadataRaw(Env, channelName, function (err, metadata) {
if (err) {
console.error(err);
return void cb(err);
}
if (!(metadata && typeof(metadata.channel) === 'string' && metadata.channel.length === STANDARD_CHANNEL_LENGTH)) {
return cb();
}
// cache it
Env.metadata_cache[channelName] = metadata;
cb(undefined, metadata);
});
};
/* getIndex
calls back with an error if anything goes wrong
or with a cached index for a channel if it exists
(along with metadata)
otherwise it calls back with the index computed by 'computeIndex'
as an added bonus:
if the channel exists but its index does not then it caches the index
*/
const getIndex = (Env, channelName, cb) => {
const channel_cache = Env.channel_cache;
const chan = channel_cache[channelName];
// if there is a channel in memory and it has an index cached, return it
if (chan && chan.index) {
// enforce async behaviour
return void Util.mkAsync(cb)(undefined, chan.index);
}
Env.batchIndexReads(channelName, cb, function (done) {
Env.computeIndex(Env, channelName, (err, ret) => {
// this is most likely an unrecoverable filesystem error
if (err) { return void done(err); }
// cache the computed result if possible
if (chan) { chan.index = ret; }
// return
done(void 0, ret);
});
});
};
/* checkOffsetMap
Sorry for the weird function --ansuz
This should be almost equivalent to `Object.keys(map).length` except
that is will use less memory by not allocating space for the temporary array.
Beyond that, it returns length * -1 if any of the members of the map
are not in ascending order. The function for removing older members of the map
loops over elements in order and deletes them, so ordering is important!
*/
var checkOffsetMap = function (map) {
var prev = 0;
var cur;
var ooo = 0; // out of order
var count = 0;
for (let k in map) {
count++;
cur = map[k];
if (!ooo && prev > cur) { ooo = true; }
prev = cur;
}
return ooo ? count * -1: count;
};
/* Pass the map and the number of elements it contains */
var trimOffsetByOrder = function (map, n) {
var toRemove = Math.max(n - 50, 0);
var i = 0;
for (let k in map) {
if (i >= toRemove) { return; }
i++;
delete map[k];
}
};
/* Remove from the map any byte offsets which are below
the lowest offset you'd like to preserve
(probably the oldest checkpoint */
var trimMapByOffset = function (map, offset) {
if (!offset) { return; }
for (let k in map) {
if (map[k] < offset) {
delete map[k];
}
}
};
/* storeMessage
* channel id
* the message to store
* whether the message is a checkpoint
* optionally the hash of the message
* it's not always used, but we guard against it
* async but doesn't have a callback
* source of a race condition whereby:
* two messaages can be inserted
* two offsets can be computed using the total size of all the messages
* but the offsets don't correspond to the actual location of the newlines
* because the two actions were performed like ABba...
* the fix is to use callbacks and implement queueing for writes
* to guarantee that offset computation is always atomic with writes
*/
const storeMessage = function (Env, channel, msg, isCp, optionalMessageHash) {
const id = channel.id;
const Log = Env.Log;
Env.queueStorage(id, function (next) {
const msgBin = Buffer.from(msg + '\n', 'utf8');
// Store the message first, and update the index only once it's stored.
// store.messageBin can be async so updating the index first may
// result in a wrong cpIndex
nThen((waitFor) => {
Env.store.messageBin(id, msgBin, waitFor(function (err) {
if (err) {
waitFor.abort();
Log.error("HK_STORE_MESSAGE_ERROR", err.message);
// this error is critical, but there's not much we can do at the moment
// proceed with more messages, but they'll probably fail too
// at least you won't have a memory leak
// TODO make it possible to respond to clients with errors so they know
// their message wasn't stored
return void next();
}
}));
}).nThen((waitFor) => {
getIndex(Env, id, waitFor((err, index) => {
if (err) {
Log.warn("HK_STORE_MESSAGE_INDEX", err.stack);
// non-critical, we'll be able to get the channel index later
return void next();
}
if (typeof (index.line) === "number") { index.line++; }
if (isCp) {
index.cpIndex = sliceCpIndex(index.cpIndex, index.line || 0);
trimMapByOffset(index.offsetByHash, index.cpIndex[0]);
index.cpIndex.push({
offset: index.size,
line: ((index.line || 0) + 1)
});
}
if (optionalMessageHash) {
index.offsetByHash[optionalMessageHash] = index.size;
index.offsets++;
}
if (index.offsets >= 100 && !index.cpIndex.length) {
let offsetCount = checkOffsetMap(index.offsetByHash);
if (offsetCount < 0) {
Log.warn('OFFSET_TRIM_OOO', {
channel: id,
map: index.OffsetByHash
});
} else if (offsetCount > 0) {
trimOffsetByOrder(index.offsetByHash, index.offsets);
index.offsets = checkOffsetMap(index.offsetByHash);
}
}
index.size += msgBin.length;
// handle the next element in the queue
next();
}));
});
});
};
/* getHistoryOffset
returns a number representing the byte offset from the start of the log
for whatever history you're seeking.
query by providing a 'lastKnownHash',
which is really just a string of the first 64 characters of an encrypted message.
OR by -1 which indicates that we want the full history (byte offset 0)
OR nothing, which indicates that you want whatever messages the historyKeeper deems relevant
(typically the last few checkpoints)
this function embeds a lot of the history keeper's logic:
0. if you passed -1 as the lastKnownHash it means you want the complete history
* I'm not sure why you'd need to call this function if you know it will return 0 in this case...
* it has a side-effect of filling the index cache if it's empty
1. if you provided a lastKnownHash and that message does not exist in the history:
* either the client has made a mistake or the history they knew about no longer exists
* call back with EUNKNOWN
2. if you did not provide a lastKnownHash
* and there are fewer than two checkpoints:
* return 0 (read from the start of the file)
* and there are two or more checkpoints:
* return the offset of the earliest checkpoint which 'sliceCpIndex' considers relevant
3. if you did provide a lastKnownHash
* read through the log until you find the hash that you're looking for
* call back with either the byte offset of the message that you found OR
* -1 if you didn't find it
*/
const getHistoryOffset = (Env, channelName, lastKnownHash, _cb) => {
const cb = Util.once(Util.mkAsync(_cb));
// lastKnownhash === -1 means we want the complete history
if (lastKnownHash === -1) { return void cb(null, 0); }
let offset = -1;
nThen((waitFor) => {
getIndex(Env, channelName, waitFor((err, index) => {
if (err) { waitFor.abort(); return void cb(err); }
// check if the "hash" the client is requesting exists in the index
const lkh = index.offsetByHash[lastKnownHash];
// fall through to the next block if the offset of the hash in question is not in memory
if (lastKnownHash && typeof(lkh) !== "number") { return; }
// Since last 2 checkpoints
if (!lastKnownHash) {
waitFor.abort();
// Less than 2 checkpoints in the history: return everything
if (index.cpIndex.length < 2) { return void cb(null, 0); }
// Otherwise return the second last checkpoint's index
return void cb(null, index.cpIndex[0].offset);
/* LATER...
in practice, two checkpoints can be very close together
we have measures to avoid duplicate checkpoints, but editors
can produce nearby checkpoints which are slightly different,
and slip past these protections. To be really careful, we can
seek past nearby checkpoints by some number of patches so as
to ensure that all editors have sufficient knowledge of history
to reconcile their differences. */
}
offset = lkh;
}));
}).nThen((w) => {
// skip past this block if the offset is anything other than -1
// this basically makes these first two nThen blocks behave like if-else
if (offset !== -1) { return; }
// either the message exists in history but is not in the cached index
// or it does not exist at all. In either case 'getHashOffset' is expected
// to return a number: -1 if not present, positive interger otherwise
Env.getHashOffset(channelName, lastKnownHash, w(function (err, _offset) {
if (err) {
w.abort();
return void cb(err);
}
offset = _offset;
}));
}).nThen(() => {
cb(null, offset);
});
};
/* getHistoryAsync
* finds the appropriate byte offset from which to begin reading using 'getHistoryOffset'
* streams through the rest of the messages, safely parsing them and returning the parsed content to the handler
* calls back when it has reached the end of the log
Used by:
* GET_HISTORY
*/
const getHistoryAsync = (Env, channelName, lastKnownHash, beforeHash, handler, cb) => {
const store = Env.store;
let offset = -1;
nThen((waitFor) => {
getHistoryOffset(Env, channelName, lastKnownHash, waitFor((err, os) => {
if (err) {
waitFor.abort();
return void cb(err);
}
offset = os;
}));
}).nThen((waitFor) => {
if (offset === -1) {
return void cb(new Error('EUNKNOWN'));
}
const start = (beforeHash) ? 0 : offset;
store.readMessagesBin(channelName, start, (msgObj, readMore, abort) => {
if (beforeHash && msgObj.offset >= offset) { return void abort(); }
var parsed = tryParse(Env, msgObj.buff.toString('utf8'));
if (!parsed) { return void readMore(); }
handler(parsed, readMore);
}, waitFor(function (err) {
return void cb(err);
}));
});
};
const handleRPC = function (Env, Server, seq, userId, parsed) {
const HISTORY_KEEPER_ID = Env.id;
/* RPC Calls... */
var rpc_call = parsed.slice(1);
Server.send(userId, [seq, 'ACK']);
try {
// slice off the sequence number and pass in the rest of the message
Env.rpc(Server, userId, rpc_call, function (err, output) {
if (err) {
Server.send(userId, [0, HISTORY_KEEPER_ID, 'MSG', userId, JSON.stringify([parsed[0], 'ERROR', err])]);
return;
}
Server.send(userId, [0, HISTORY_KEEPER_ID, 'MSG', userId, JSON.stringify([parsed[0]].concat(output))]);
});
} catch (e) {
// if anything throws in the middle, send an error
Server.send(userId, [0, HISTORY_KEEPER_ID, 'MSG', userId, JSON.stringify([parsed[0], 'ERROR', 'SERVER_ERROR'])]);
}
};
/*
This is called when a user tries to connect to a channel that doesn't exist.
we initialize that channel by writing the metadata supplied by the user to its log.
if the provided metadata has an expire time then we also create a task to expire it.
*/
const handleFirstMessage = function (Env, channelName, metadata) {
Env.store.writeMetadata(channelName, JSON.stringify(metadata), function (err) {
if (err) {
// FIXME tell the user that there was a channel error?
return void Env.Log.error('HK_WRITE_METADATA', {
channel: channelName,
error: err,
});
}
});
// write tasks
if(metadata.expire && typeof(metadata.expire) === 'number') {
// the fun part...
// the user has said they want this pad to expire at some point
Env.writeTask(metadata.expire, "EXPIRE", [ channelName ], function (err) {
if (err) {
// if there is an error, we don't want to crash the whole server...
// just log it, and if there's a problem you'll be able to fix it
// at a later date with the provided information
Env.Log.error('HK_CREATE_EXPIRE_TASK', err);
Env.Log.info('HK_INVALID_EXPIRE_TASK', JSON.stringify([metadata.expire, 'EXPIRE', channelName]));
}
});
}
};
const handleGetHistory = function (Env, Server, seq, userId, parsed) {
const metadata_cache = Env.metadata_cache;
const HISTORY_KEEPER_ID = Env.id;
const Log = Env.Log;
// parsed[1] is the channel id
// parsed[2] is a validation key or an object containing metadata (optionnal)
// parsed[3] is the last known hash (optionnal)
Server.send(userId, [seq, 'ACK']);
var channelName = parsed[1];
var config = parsed[2];
var metadata = {};
var lastKnownHash;
var txid;
// clients can optionally pass a map of attributes
// if the channel already exists this map will be ignored
// otherwise it will be stored as the initial metadata state for the channel
if (config && typeof config === "object" && !Array.isArray(parsed[2])) {
lastKnownHash = config.lastKnownHash;
metadata = config.metadata || {};
txid = config.txid;
if (metadata.expire) {
metadata.expire = +metadata.expire * 1000 + (+new Date());
}
}
metadata.channel = channelName;
metadata.created = +new Date();
// if the user sends us an invalid key, we won't be able to validate their messages
// so they'll never get written to the log anyway. Let's just drop their message
// on the floor instead of doing a bunch of extra work
// TODO send them an error message so they know something is wrong
if (metadata.validateKey && !isValidValidateKeyString(metadata.validateKey)) {
return void Log.error('HK_INVALID_KEY', metadata.validateKey);
}
nThen(function (waitFor) {
var w = waitFor();
/* fetch the channel's metadata.
use it to check if the channel has expired.
send it to the client if it exists.
*/
getMetadata(Env, channelName, waitFor(function (err, metadata) {
if (err) {
Env.Log.error('HK_GET_HISTORY_METADATA', {
channel: channelName,
error: err,
});
return void w();
}
if (!metadata || !metadata.channel) { return w(); }
// if there is already a metadata log then use it instead
// of whatever the user supplied
// it's possible that the channel doesn't have metadata
// but in that case there's no point in checking if the channel expired
// or in trying to send metadata, so just skip this block
if (!metadata) { return void w(); }
// And then check if the channel is expired. If it is, send the error and abort
// FIXME this is hard to read because 'checkExpired' has side effects
if (checkExpired(Env, Server, channelName)) { return void waitFor.abort(); }
// always send metadata with GET_HISTORY requests
Server.send(userId, [0, HISTORY_KEEPER_ID, 'MSG', userId, JSON.stringify(metadata)], w);
}));
}).nThen(() => {
let msgCount = 0;
// TODO compute lastKnownHash in a manner such that it will always skip past the metadata line?
getHistoryAsync(Env, channelName, lastKnownHash, false, (msg, readMore) => {
msgCount++;
// avoid sending the metadata message a second time
if (isMetadataMessage(msg) && metadata_cache[channelName]) { return readMore(); }
if (txid) { msg[0] = txid; }
Server.send(userId, [0, HISTORY_KEEPER_ID, 'MSG', userId, JSON.stringify(msg)], readMore);
}, (err) => {
if (err && err.code !== 'ENOENT') {
if (err.message !== 'EINVAL') { Log.error("HK_GET_HISTORY", {
err: err && err.message,
stack: err && err.stack,
}); }
const parsedMsg = {error:err.message, channel: channelName, txid: txid};
Server.send(userId, [0, HISTORY_KEEPER_ID, 'MSG', userId, JSON.stringify(parsedMsg)]);
return;
}
if (msgCount === 0 && !metadata_cache[channelName] && Server.channelContainsUser(channelName, userId)) {
handleFirstMessage(Env, channelName, metadata);
Server.send(userId, [0, HISTORY_KEEPER_ID, 'MSG', userId, JSON.stringify(metadata)]);
}
// End of history message:
let parsedMsg = {state: 1, channel: channelName, txid: txid};
Server.send(userId, [0, HISTORY_KEEPER_ID, 'MSG', userId, JSON.stringify(parsedMsg)]);
});
});
};
const handleGetHistoryRange = function (Env, Server, seq, userId, parsed) {
var channelName = parsed[1];
var map = parsed[2];
const HISTORY_KEEPER_ID = Env.id;
if (!(map && typeof(map) === 'object')) {
return void Server.send(userId, [seq, 'ERROR', 'INVALID_ARGS', HISTORY_KEEPER_ID]);
}
var oldestKnownHash = map.from;
var desiredMessages = map.count;
var desiredCheckpoint = map.cpCount;
var txid = map.txid;
if (typeof(desiredMessages) !== 'number' && typeof(desiredCheckpoint) !== 'number') {
return void Server.send(userId, [seq, 'ERROR', 'UNSPECIFIED_COUNT', HISTORY_KEEPER_ID]);
}
if (!txid) {
return void Server.send(userId, [seq, 'ERROR', 'NO_TXID', HISTORY_KEEPER_ID]);
}
Server.send(userId, [seq, 'ACK']);
Env.getOlderHistory(channelName, oldestKnownHash, desiredMessages, desiredCheckpoint, function (err, toSend) {
if (err && err.code !== 'ENOENT') {
Env.Log.error("HK_GET_OLDER_HISTORY", err);
}
if (Array.isArray(toSend)) {
toSend.forEach(function (msg) {
Server.send(userId, [0, HISTORY_KEEPER_ID, 'MSG', userId,
JSON.stringify(['HISTORY_RANGE', txid, msg])]);
});
}
Server.send(userId, [0, HISTORY_KEEPER_ID, 'MSG', userId,
JSON.stringify(['HISTORY_RANGE_END', txid, channelName])
]);
});
};
const handleGetFullHistory = function (Env, Server, seq, userId, parsed) {
const HISTORY_KEEPER_ID = Env.id;
const Log = Env.Log;
// parsed[1] is the channel id
// parsed[2] is a validation key (optionnal)
// parsed[3] is the last known hash (optionnal)
Server.send(userId, [seq, 'ACK']);
// FIXME should we send metadata here too?
// none of the clientside code which uses this API needs metadata, but it won't hurt to send it (2019-08-22)
return void getHistoryAsync(Env, parsed[1], -1, false, (msg, readMore) => {
Server.send(userId, [0, HISTORY_KEEPER_ID, 'MSG', userId, JSON.stringify(['FULL_HISTORY', msg])], readMore);
}, (err) => {
let parsedMsg = ['FULL_HISTORY_END', parsed[1]];
if (err) {
Log.error('HK_GET_FULL_HISTORY', err.stack);
parsedMsg = ['ERROR', parsed[1], err.message];
}
Server.send(userId, [0, HISTORY_KEEPER_ID, 'MSG', userId, JSON.stringify(parsedMsg)]);
});
};
const directMessageCommands = {
GET_HISTORY: handleGetHistory,
GET_HISTORY_RANGE: handleGetHistoryRange,
GET_FULL_HISTORY: handleGetFullHistory,
};
/* onDirectMessage
* exported for use by the netflux-server
* parses and handles all direct messages directed to the history keeper
* check if it's expired and execute all the associated side-effects
* routes queries to the appropriate handlers
*/
HK.onDirectMessage = function (Env, Server, seq, userId, json) {
const Log = Env.Log;
const HISTORY_KEEPER_ID = Env.id;
Log.silly('HK_MESSAGE', json);
let parsed;
try {
parsed = JSON.parse(json[2]);
} catch (err) {
Log.error("HK_PARSE_CLIENT_MESSAGE", json);
return;
}
var first = parsed[0];
if (typeof(directMessageCommands[first]) !== 'function') {
// it's either an unsupported command or an RPC call
// either way, RPC has it covered
return void handleRPC(Env, Server, seq, userId, parsed);
}
// otherwise it's some kind of history retrieval command...
// go grab its metadata, because unfortunately people can ask for history
// whether or not they have joined the channel, so we can't rely on JOIN restriction
// to stop people from loading history they shouldn't see.
var channelName = parsed[1];
nThen(function (w) {
getMetadata(Env, channelName, w(function (err, metadata) {
if (err) {
// stream errors?
// we should log these, but if we can't load metadata
// then it's probably not restricted or expired
// it's not like anything else will recover from this anyway
return;
}
// likewise, we can't do anything more here if there's no metadata
// jump to the next block
if (!metadata) { return; }
// If the requested history is for an expired channel, abort
// checkExpired has side effects and will disconnect users for you...
if (checkExpired(Env, Server, parsed[1])) {
// if the channel is expired just abort.
w.abort();
return;
}
// jump to handling the command if there's no restriction...
if (!metadata.restricted) { return; }
// check if the user is in the allow list...
const allowed = HK.listAllowedUsers(metadata);
const session = HK.getNetfluxSession(Env, userId);
if (HK.isUserSessionAllowed(allowed, session)) {
return;
}
/* Anyone in the userlist that isn't in the allow list should have already
been kicked out of the channel. Likewise, disallowed users should not
be able to add themselves to the userlist because JOIN commands respect
access control settings. The error that is sent below protects against
the remaining case, in which users try to get history without having
joined the channel. Normally we'd send the allow list to tell them the
key with which they should authenticate, but since we don't use this
behaviour, I'm doing the easy thing and just telling them to GO AWAY.
We can implement the more advanced behaviour later if it turns out that
we need it. This command validates guards against all kinds of history
access: GET_HISTORY, GET_HISTORY_RANGE, GET_FULL_HISTORY.
*/
w.abort();
return void Server.send(userId, [
seq,
'ERROR',
'ERESTRICTED',
HISTORY_KEEPER_ID
]);
}));
}).nThen(function () {
// run the appropriate command from the map
directMessageCommands[first](Env, Server, seq, userId, parsed);
});
};
/* onChannelMessage
Determine what we should store when a message a broadcasted to a channel"
* ignores ephemeral channels
* ignores messages sent to expired channels
* rejects duplicated checkpoints
* validates messages to channels that have validation keys
* caches the id of the last saved checkpoint
* adds timestamps to incoming messages
* writes messages to the store
*/
HK.onChannelMessage = function (Env, Server, channel, msgStruct) {
//console.log(+new Date(), "onChannelMessage");
const Log = Env.Log;
// TODO our usage of 'channel' here looks prone to errors
// we only use it for its 'id', but it can contain other stuff
// also, we're using this RPC from both the RPC and Netflux-server
// we should probably just change this to expect a channel id directly
// don't store messages if the channel id indicates that it's an ephemeral message
if (!channel.id || channel.id.length === EPHEMERAL_CHANNEL_LENGTH) { return; }
const isCp = /^cp\|/.test(msgStruct[4]);
let id;
if (isCp) {
// id becomes either null or an array or results...
id = CHECKPOINT_PATTERN.exec(msgStruct[4]);
if (Array.isArray(id) && id[2] && id[2] === channel.lastSavedCp) {
// Reject duplicate checkpoints
return;
}
}
let metadata;
nThen(function (w) {
getMetadata(Env, channel.id, w(function (err, _metadata) {
// if there's no channel metadata then it can't be an expiring channel
// nor can we possibly validate it
if (!_metadata) { return; }
metadata = _metadata;
// don't write messages to expired channels
if (checkExpired(Env, Server, channel)) { return void w.abort(); }
}));
}).nThen(function (w) {
// if there's no validateKey present skip to the next block
if (!(metadata && metadata.validateKey)) { return; }
// trim the checkpoint indicator off the message if it's present
let signedMsg = (isCp) ? msgStruct[4].replace(CHECKPOINT_PATTERN, '') : msgStruct[4];
// convert the message from a base64 string into a Uint8Array
//const txid = Util.uid();
// Listen for messages
//console.log(+new Date(), "Send verification request");
Env.validateMessage(signedMsg, metadata.validateKey, w(function (err) {
// no errors means success
if (!err) { return; }
// validation can fail in multiple ways
if (err === 'FAILED') {
// we log this case, but not others for some reason
Log.info("HK_SIGNED_MESSAGE_REJECTED", 'Channel '+channel.id);
}
// always abort if there was an error...
return void w.abort();
}));
}).nThen(function () {
// do checkpoint stuff...
// 1. get the checkpoint id
// 2. reject duplicate checkpoints
if (isCp) {
// if the message is a checkpoint we will have already validated
// that it isn't a duplicate. remember its id so that we can
// repeat this process for the next incoming checkpoint
// WARNING: the fact that we only check the most recent checkpoints
// is a potential source of bugs if one editor has high latency and
// pushes a duplicate of an earlier checkpoint than the latest which
// has been pushed by editors with low latency
// FIXME
if (Array.isArray(id) && id[2]) {
// Store new checkpoint hash
channel.lastSavedCp = id[2];
}
}
// add the time to the message
msgStruct.push(now());
// storeMessage
//console.log(+new Date(), "Storing message");
storeMessage(Env, channel, JSON.stringify(msgStruct), isCp, getHash(msgStruct[4], Log));
//console.log(+new Date(), "Message stored");
});
};

26
lib/load-config.js

@ -1,7 +1,7 @@
/* jslint node: true */
"use strict";
var config;
var configPath = process.env.CRYPTPAD_CONFIG || "../config/config";
var configPath = process.env.CRYPTPAD_CONFIG || "../config/config.js";
try {
config = require(configPath);
if (config.adminEmail === 'i.did.not.read.my.config@cryptpad.fr') {
@ -18,5 +18,29 @@ try {
}
config = require("../config/config.example");
}
var isPositiveNumber = function (n) {
return (!isNaN(n) && n >= 0);
};
if (!isPositiveNumber(config.inactiveTime)) {
config.inactiveTime = 90;
}
if (!isPositiveNumber(config.archiveRetentionTime)) {
config.archiveRetentionTime = 90;
}
if (!isPositiveNumber(config.maxUploadSize)) {
config.maxUploadSize = 20 * 1024 * 1024;
}
if (!isPositiveNumber(config.defaultStorageLimit)) {
config.defaultStorageLimit = 50 * 1024 * 1024;
}
// premiumUploadSize is worthless if it isn't a valid positive number
// or if it's less than the default upload size
if (!isPositiveNumber(config.premiumUploadSize) || config.premiumUploadSize < config.maxUploadSize) {
delete config.premiumUploadSize;
}
module.exports = config;

13
lib/log.js

@ -1,5 +1,5 @@
/*jshint esversion: 6 */
var Store = require("../storage/file");
var Store = require("./storage/file");
var Logger = module.exports;
@ -21,7 +21,7 @@ var write = function (ctx, content) {
};
// various degrees of logging
const logLevels = ['silly', 'verbose', 'debug', 'feedback', 'info', 'warn', 'error'];
const logLevels = Logger.levels = ['silly', 'verbose', 'debug', 'feedback', 'info', 'warn', 'error'];
var handlers = {
silly: function (ctx, time, tag, info) {
@ -96,12 +96,17 @@ Logger.create = function (config, cb) {
if (!config.logPath) {
console.log("No logPath configured. Logging to file disabled");
return void cb(Object.freeze(createMethods(ctx)));
var logger = createMethods(ctx);
logger.shutdown = noop;
return void cb(Object.freeze(logger));
}
Store.create({
filePath: config.logPath,
}, function (store) {
}, function (err, store) {
if (err) {
throw err;
}
ctx.store = store;
var logger = createMethods(ctx);
logger.shutdown = function () {

217
lib/metadata.js

@ -1,24 +1,170 @@
var Meta = module.exports;
var deduplicate = require("./deduplicate");
var deduplicate = require("./common-util").deduplicateString;
/* Metadata fields:
/* Metadata fields and the commands that can modify them
we assume that these commands can only be performed
by owners or in some cases pending owners. Thus
the owners field is guaranteed to exist.
* channel <STRING>
* validateKey <STRING>
* owners <ARRAY>
* ADD_OWNERS
* RM_OWNERS
* RESET_OWNERS
* pending_owners <ARRAY>
* ADD_PENDING_OWNERS
* RM_PENDING_OWNERS
* expire <NUMBER>
* UPDATE_EXPIRATION (NOT_IMPLEMENTED)
* restricted <BOOLEAN>
* RESTRICT_ACCESS
* allowed <ARRAY>
* ADD_ALLOWED
* RM_ALLOWED
* RESET_ALLOWED
* ADD_OWNERS
* RESET_OWNERS
* mailbox <STRING|MAP>
* ADD_MAILBOX
* RM_MAILBOX
*/
var commands = {};
var isValidOwner = function (owner) {
var isValidPublicKey = function (owner) {
return typeof(owner) === 'string' && owner.length === 44;
};
// isValidPublicKey is a better indication of what the above function does
// I'm preserving this function name in case we ever want to expand its
// criteria at a later time...
var isValidOwner = isValidPublicKey;
// ["RESTRICT_ACCESS", [true], 1561623438989]
// ["RESTRICT_ACCESS", [false], 1561623438989]
commands.RESTRICT_ACCESS = function (meta, args) {
if (!Array.isArray(args) || typeof(args[0]) !== 'boolean') {
throw new Error('INVALID_STATE');
}
var bool = args[0];
// reject the proposed command if there is no change in state
if (meta.restricted === bool) { return false; }
// apply the new state
meta.restricted = args[0];
// if you're disabling access restrictions then you can assume
// then there is nothing more to do. Leave the existing list as-is
if (!bool) { return true; }
// you're all set if an allow list already exists
if (Array.isArray(meta.allowed)) { return true; }
// otherwise define it
meta.allowed = [];
return true;
};
// ["ADD_ALLOWED", ["7eEqelGso3EBr5jHlei6av4r9w2B9XZiGGwA1EgZ-5I=", ...], 1561623438989]
commands.ADD_ALLOWED = function (meta, args) {
if (!Array.isArray(args)) {
throw new Error("INVALID_ARGS");
}
var allowed = meta.allowed || [];
var changed = false;
args.forEach(function (arg) {
// don't add invalid public keys
if (!isValidPublicKey(arg)) { return; }
// don't add owners to the allow list
if (meta.owners.indexOf(arg) >= 0) { return; }
// don't duplicate entries in the allow list
if (allowed.indexOf(arg) >= 0) { return; }
allowed.push(arg);
changed = true;
});
if (changed) {
meta.allowed = meta.allowed || allowed;
}
return changed;
};
// ["RM_ALLOWED", ["7eEqelGso3EBr5jHlei6av4r9w2B9XZiGGwA1EgZ-5I=", ...], 1561623438989]
commands.RM_ALLOWED = function (meta, args) {
if (!Array.isArray(args)) {
throw new Error("INVALID_ARGS");
}
// there may not be anything to remove
if (!meta.allowed) { return false; }
var changed = false;
args.forEach(function (arg) {
var index = meta.allowed.indexOf(arg);
if (index < 0) { return; }
meta.allowed.splice(index, 1);
changed = true;
});
return changed;
};
var arrayHasChanged = function (A, B) {
var changed;
A.some(function (a) {
if (B.indexOf(a) < 0) { return (changed = true); }
});
if (changed) { return true; }
B.some(function (b) {
if (A.indexOf(b) < 0) { return (changed = true); }
});
return changed;
};
var filterInPlace = function (A, f) {
for (var i = A.length - 1; i >= 0; i--) {
if (f(A[i], i, A)) { A.splice(i, 1); }
}
};
// ["RESET_ALLOWED", ["7eEqelGso3EBr5jHlei6av4r9w2B9XZiGGwA1EgZ-5I=", ...], 1561623438989]
commands.RESET_ALLOWED = function (meta, args) {
if (!Array.isArray(args)) { throw new Error("INVALID_ARGS"); }
var updated = args.filter(function (arg) {
// don't allow invalid public keys
if (!isValidPublicKey(arg)) { return false; }
// don't ever add owners to the allow list
if (meta.owners.indexOf(arg)) { return false; }
return true;
});
// this is strictly an optimization...
// a change in length is a clear indicator of a functional change
if (meta.allowed && meta.allowed.length !== updated.length) {
meta.allowed = updated;
return true;
}
// otherwise we must check that the arrays contain distinct elements
// if there is no functional change, then return false
if (!arrayHasChanged(meta.allowed, updated)) { return false; }
// otherwise overwrite the in-memory data and indicate that there was a change
meta.allowed = updated;
return true;
};
// ["ADD_OWNERS", ["7eEqelGso3EBr5jHlei6av4r9w2B9XZiGGwA1EgZ-5I="], 1561623438989]
commands.ADD_OWNERS = function (meta, args) {
// bail out if args isn't an array
@ -40,6 +186,13 @@ commands.ADD_OWNERS = function (meta, args) {
changed = true;
});
if (changed && Array.isArray(meta.allowed)) {
// make sure owners are not included in the allow list
filterInPlace(meta.allowed, function (member) {
return meta.owners.indexOf(member) !== -1;
});
}
return changed;
};
@ -71,6 +224,10 @@ commands.RM_OWNERS = function (meta, args) {
changed = true;
});
if (meta.owners.length === 0 && meta.restricted) {
meta.restricted = false;
}
return changed;
};
@ -141,6 +298,18 @@ commands.RESET_OWNERS = function (meta, args) {
// overwrite the existing owners with the new one
meta.owners = deduplicate(args.filter(isValidOwner));
if (Array.isArray(meta.allowed)) {
// make sure owners are not included in the allow list
filterInPlace(meta.allowed, function (member) {
return meta.owners.indexOf(member) !== -1;
});
}
if (meta.owners.length === 0 && meta.restricted) {
meta.restricted = false;
}
return true;
};
@ -178,6 +347,25 @@ commands.ADD_MAILBOX = function (meta, args) {
return changed;
};
commands.RM_MAILBOX = function (meta, args) {
if (!Array.isArray(args)) { throw new Error("INVALID_ARGS"); }
if (!meta.mailbox || typeof(meta.mailbox) === 'undefined') {
return false;
}
if (typeof(meta.mailbox) === 'string' && args.length === 0) {
delete meta.mailbox;
return true;
}
var changed = false;
args.forEach(function (arg) {
if (meta.mailbox[arg] === 'undefined') { return; }
delete meta.mailbox[arg];
changed = true;
});
return changed;
};
commands.UPDATE_EXPIRATION = function () {
throw new Error("E_NOT_IMPLEMENTED");
};
@ -198,6 +386,7 @@ Meta.commands = Object.keys(commands);
Meta.createLineHandler = function (ref, errorHandler) {
ref.meta = {};
ref.index = 0;
ref.logged = {};
return function (err, line) {
if (err) {
@ -211,13 +400,20 @@ Meta.createLineHandler = function (ref, errorHandler) {
line: JSON.stringify(line),
});
}
// the case above is special, everything else should increment the index
var index = ref.index++;
if (typeof(line) === 'undefined') { return; }
if (Array.isArray(line)) {
try {
handleCommand(ref.meta, line);
ref.index++;
} catch (err2) {
var code = err2.message;
if (ref.logged[code]) { return; }
ref.logged[code] = true;
errorHandler("METADATA_COMMAND_ERR", {
error: err2.stack,
line: line,
@ -226,8 +422,15 @@ Meta.createLineHandler = function (ref, errorHandler) {
return;
}
if (ref.index === 0 && typeof(line) === 'object') {
ref.index++;
// the first line of a channel is processed before the dedicated metadata log.
// it can contain a map, in which case it should be used as the initial state.
// it's possible that a trim-history command was interrupted, in which case
// this first message might exist in parallel with the more recent metadata log
// which will contain the computed state of the previous metadata log
// which has since been archived.
// Thus, accept both the first and second lines you process as valid initial state
// preferring the second if it exists
if (index < 2 && line && typeof(line) === 'object') {
// special case!
ref.meta = line;
return;
@ -235,7 +438,7 @@ Meta.createLineHandler = function (ref, errorHandler) {
errorHandler("METADATA_HANDLER_WEIRDLINE", {
line: line,
index: ref.index++,
index: index,
});
};
};

7
lib/once.js

@ -1,7 +0,0 @@
module.exports = function (f, g) {
return function () {
if (!f) { return; }
f.apply(this, Array.prototype.slice.call(arguments));
f = g;
};
};

185
lib/pins.js

@ -2,6 +2,14 @@
var Pins = module.exports;
const Fs = require("fs");
const Path = require("path");
const Util = require("./common-util");
const Plan = require("./plan");
const Semaphore = require('saferphore');
const nThen = require('nthen');
/* Accepts a reference to an object, and...
either a string describing which log is being processed (backwards compatibility),
or a function which will log the error with all relevant data
@ -22,7 +30,11 @@ var createLineHandler = Pins.createLineHandler = function (ref, errorHandler) {
// make sure to get ref.pins as the result
// it's a weird API but it's faster than unpinning manually
var pins = ref.pins = {};
ref.index = 0;
ref.latest = 0; // the latest message (timestamp in ms)
ref.surplus = 0; // how many lines exist behind a reset
return function (line) {
ref.index++;
if (!Boolean(line)) { return; }
var l;
@ -36,10 +48,15 @@ var createLineHandler = Pins.createLineHandler = function (ref, errorHandler) {
return void errorHandler('PIN_LINE_NOT_FORMAT_ERROR', l);
}
if (typeof(l[2]) === 'number') {
ref.latest = l[2]; // date
}
switch (l[0]) {
case 'RESET': {
pins = ref.pins = {};
if (l[1] && l[1].length) { l[1].forEach((x) => { ref.pins[x] = 1; }); }
ref.surplus = ref.index;
//jshint -W086
// fallthrough
}
@ -72,5 +89,171 @@ Pins.calculateFromLog = function (pinFile, fileName) {
return Object.keys(ref.pins);
};
// TODO refactor to include a streaming version for use in rpc.js as well
/*
pins/
pins/A+/
pins/A+/A+hyhrQLrgYixOomZYxpuEhwfiVzKk1bBp+arH-zbgo=.ndjson
*/
const getSafeKeyFromPath = function (path) {
return path.replace(/^.*\//, '').replace(/\.ndjson/, '');
};
const addUserPinToState = Pins.addUserPinToState = function (state, safeKey, itemId) {
(state[itemId] = state[itemId] || {})[safeKey] = 1;
};
Pins.list = function (_done, config) {
// allow for a configurable pin store location
const pinPath = config.pinPath || './data/pins';
// allow for a configurable amount of parallelism
const plan = Plan(config.workers || 5);
// run a supplied handler whenever you finish reading a log
// or noop if not supplied.
const handler = config.handler || function () {};
// use and mutate a supplied object for state if it's passed
const pinned = config.pinned || {};
var isDone = false;
// ensure that 'done' is only called once
// that it calls back asynchronously
// and that it sets 'isDone' to true, so that pending processes
// know to abort
const done = Util.once(Util.both(Util.mkAsync(_done), function () {
isDone = true;
}));
const errorHandler = function (label, info) {
console.log(label, info);
};
// TODO replace this with lib-readline?
const streamFile = function (path, cb) {
const id = getSafeKeyFromPath(path);
return void Fs.readFile(path, 'utf8', function (err, body) {
if (err) { return void cb(err); }
const ref = {};
const pinHandler = createLineHandler(ref, errorHandler);
var lines = body.split('\n');
lines.forEach(pinHandler);
handler(ref, id, pinned);
cb(void 0, ref);
});
};
const scanDirectory = function (path, cb) {
Fs.readdir(path, function (err, list) {
if (err) {
return void cb(err);
}
cb(void 0, list.map(function (item) {
return {
path: Path.join(path, item),
id: item.replace(/\.ndjson$/, ''),
};
}));
});
};
scanDirectory(pinPath, function (err, dirs) {
if (err) {
if (err.code === 'ENOENT') { return void done(void 0, {}); }
return void done(err);
}
dirs.forEach(function (dir) {
plan.job(1, function (next) {
if (isDone) { return void next(); }
scanDirectory(dir.path, function (nested_err, logs) {
if (nested_err) {
return void done(err);
}
logs.forEach(function (log) {
if (!/\.ndjson$/.test(log.path)) { return; }
plan.job(0, function (next) {
if (isDone) { return void next(); }
streamFile(log.path, function (err, ref) {
if (err) { return void done(err); }
var set = ref.pins;
for (var item in set) {
addUserPinToState(pinned, log.id, item);
}
next();
});
});
});
next();
});
});
});
plan.done(function () {
// err ?
done(void 0, pinned);
}).start();
});
};
Pins.load = function (cb, config) {
const sema = Semaphore.create(config.workers || 5);
let dirList;
const fileList = [];
const pinned = {};
var pinPath = config.pinPath || './pins';
var done = Util.once(cb);
nThen((waitFor) => {
// recurse over the configured pinPath, or the default
Fs.readdir(pinPath, waitFor((err, list) => {
if (err) {
if (err.code === 'ENOENT') {
dirList = [];
return; // this ends up calling back with an empty object
}
waitFor.abort();
return void done(err);
}
dirList = list;
}));
}).nThen((waitFor) => {
dirList.forEach((f) => {
sema.take((returnAfter) => {
// iterate over all the subdirectories in the pin store
Fs.readdir(Path.join(pinPath, f), waitFor(returnAfter((err, list2) => {
if (err) {
waitFor.abort();
return void done(err);
}
list2.forEach((ff) => {
if (config && config.exclude && config.exclude.indexOf(ff) > -1) { return; }
fileList.push(Path.join(pinPath, f, ff));
});
})));
});
});
}).nThen((waitFor) => {
fileList.forEach((f) => {
sema.take((returnAfter) => {
Fs.readFile(f, waitFor(returnAfter((err, content) => {
if (err) {
waitFor.abort();
return void done(err);
}
const hashes = Pins.calculateFromLog(content.toString('utf8'), f);
hashes.forEach((x) => {
(pinned[x] = pinned[x] || {})[f.replace(/.*\/([^/]*).ndjson$/, (x, y)=>y)] = 1;
});
})));
});
});
}).nThen(() => {
done(void 0, pinned);
});
};

235
lib/plan.js

@ -0,0 +1,235 @@
/*
There are many situations where we want to do lots of little jobs
in parallel and with few constraints as to their ordering.
One example is recursing over a bunch of directories and reading files.
The naive way to do this is to recurse over all the subdirectories
relative to a root while adding files to a list. Then to iterate over
the files in that list. Unfortunately, this means holding the complete
list of file paths in memory, which can't possible scale as our database grows.
A better way to do this is to recurse into one directory and
iterate over its contents until there are no more, then to backtrack
to the next directory and repeat until no more directories exist.
This kind of thing is easy enough when you perform one task at a time
and use synchronous code, but with multiple asynchronous tasks it's
easy to introduce subtle bugs.
This module is designed for these situations. It allows you to easily
and efficiently schedule a large number of tasks with an associated
degree of priority from 0 (highest priority) to Number.MAX_SAFE_INTEGER.
Initialize your scheduler with a degree of parallelism, and start planning
some initial jobs. Set it to run and it will keep going until all jobs are
complete, at which point it will optionally execute a 'done' callback.
Getting back to the original example:
List the contents of the root directory, then plan subsequent jobs
with a priority of 1 to recurse into subdirectories. The callback
of each of these recursions can then plan higher priority tasks
to actually process the contained files with a priority of 0.
As long as there are more files scheduled it will continue to process
them first. When there are no more files the scheduler will read
the next directory and repopulate the list of files to process.
This will repeat until everything is done.
// load the module
const Plan = require("./plan");
// instantiate a scheduler with a parallelism of 5
var plan = Plan(5)
// plan the first job which schedules more jobs...
.job(1, function (next) {
listRootDirectory(function (files) {
files.forEach(function (file) {
// highest priority, run as soon as there is a free worker
plan.job(0, function (next) {
processFile(file, function (result) {
console.log(result);
// don't forget to call next
next();
});
});
});
next(); // call 'next' to free up one worker
});
})
// chain commands together if you want
.done(function () {
console.log("DONE");
})
// it won't run unless you launch it
.start();
*/
module.exports = function (max) {
var plan = {};
max = max || 5;
// finds an id that isn't in use in a particular map
// accepts an id in case you have one already chosen
// otherwise generates random new ids if one is not passed
// or if there is a collision
var uid = function (map, id) {
if (typeof(id) === 'undefined') {
id = Math.floor(Math.random() * Number.MAX_SAFE_INTEGER);
}
if (id && typeof(map[id]) === 'undefined') {
return id;
}
return uid(map);
};
// the queue of jobs is an array, which will be populated
// with maps for each level of priority
var jobs = [];
// the count of currently running jobs
var count = 0;
// a list of callbacks to be executed once everything is done
var completeHandlers = [];
// the recommended usage is to create a new scheduler for every job
// use it for internals in a scope, and let the garbage collector
// clean up when everything stops. This means you shouldn't
// go passing 'plan' around in a long-lived process!
var FINISHED = false;
var done = function () {
// 'done' gets called when there are no more jobs in the queue
// but other jobs might still be running...
// the count of running processes should never be less than zero
// because we guard against multiple callbacks
if (count < 0) { throw new Error("should never happen"); }
// greater than zero is definitely possible, it just means you aren't done yet
if (count !== 0) { return; }
// you will finish twice if you call 'start' a second time
// this behaviour isn't supported yet.
if (FINISHED) { throw new Error('finished twice'); }
FINISHED = true;
// execute all your 'done' callbacks
completeHandlers.forEach(function (f) { f(); });
};
var run;
// this 'next' is internal only.
// it iterates over all known jobs, running them until
// the scheduler achieves the desired amount of parallelism.
// If there are no more jobs it will call 'done'
// which will shortcircuit if there are still pending tasks.
// Whenever any tasks finishes it will return its lock and
// run as many new jobs as are allowed.
var next = function () {
// array.some skips over bare indexes in sparse arrays
var pending = jobs.some(function (bag /*, priority*/) {
if (!bag || typeof(bag) !== 'object') { return; }
// a bag is a map of jobs for any particular degree of priority
// iterate over jobs in the bag until you're out of 'workers'
for (var id in bag) {
// bail out if you hit max parallelism
if (count >= max) { return true; }
run(bag, id, next);
}
});
// check whether you're done if you hit the end of the array
if (!pending) { done(); }
};
// and here's the part that actually handles jobs...
run = function (bag, id) {
// this is just a sanity check.
// there should only ever be jobs in each bag.
if (typeof(bag[id]) !== 'function') {
throw new Error("expected function");
}
// keep a local reference to the function
var f = bag[id];
// remove it from the bag.
delete bag[id];
// increment the count of running jobs
count++;
// guard against it being called twice.
var called = false;
f(function () {
// watch out! it'll bite you.
// maybe this should just return?
// support that option for 'production' ?
if (called) { throw new Error("called twice"); }
// the code below is safe because we can't call back a second time
called = true;
// decrement the count of running jobs...
count--;
// and finally call next to replace this worker with more job(s)
next();
});
};
// this is exposed as API
plan.job = function (priority, cb) {
// you have to pass both the priority (a non-negative number) and an actual job
if (typeof(priority) !== 'number' || priority < 0) { throw new Error('expected a non-negative number'); }
// a job is an asynchronous function that takes a single parameter:
// a 'next' callback which will keep the whole thing going.
// forgetting to call 'next' means you'll never complete.
if (typeof(cb) !== 'function') { throw new Error('expected function'); }
// initialize the specified priority level if it doesn't already exist
var bag = jobs[priority] = jobs[priority] || {};
// choose a random id that isn't already in use for this priority level
var id = uid(bag);
// add the job to this priority level's bag
// most (all?) javascript engines will append this job to the bottom
// of the map. Meaning when we iterate it will be run later than
// other jobs that were scheduled first, effectively making a FIFO queue.
// However, this is undefined behaviour and you shouldn't ever rely on it.
bag[id] = function (next) {
cb(next);
};
// returning 'plan' lets us chain methods together.
return plan;
};
var started = false;
plan.start = function () {
// don't allow multiple starts
// even though it should work, it's simpler not to.
if (started) { return plan; }
// this seems to imply a 'stop' method
// but I don't need it, so I'm not implementing it now --ansuz
started = true;
// start asynchronously, otherwise jobs will start running
// before you've had a chance to return 'plan', and weird things
// happen.
setTimeout(function () {
next();
});
return plan;
};
// you can pass any number of functions to be executed
// when all pending jobs are complete.
// We don't pass any arguments, so you need to handle return values
// yourself if you want them.
plan.done = function (f) {
if (typeof(f) !== 'function') { throw new Error('expected function'); }
completeHandlers.push(f);
return plan;
};
// That's all! I hope you had fun reading this!
return plan;
};

216
lib/rpc.js

@ -0,0 +1,216 @@
/*jshint esversion: 6 */
const Util = require("./common-util");
const Core = require("./commands/core");
const Admin = require("./commands/admin-rpc");
const Pinning = require("./commands/pin-rpc");
const Quota = require("./commands/quota");
const Block = require("./commands/block");
const Metadata = require("./commands/metadata");
const Channel = require("./commands/channel");
const Upload = require("./commands/upload");
const HK = require("./hk-util");
var RPC = module.exports;
const UNAUTHENTICATED_CALLS = {
GET_FILE_SIZE: Pinning.getFileSize,
GET_MULTIPLE_FILE_SIZE: Pinning.getMultipleFileSize,
GET_DELETED_PADS: Pinning.getDeletedPads,
IS_CHANNEL_PINNED: Pinning.isChannelPinned, // FIXME drop this RPC
IS_NEW_CHANNEL: Channel.isNewChannel,
WRITE_PRIVATE_MESSAGE: Channel.writePrivateMessage,
GET_METADATA: Metadata.getMetadata,
};
var isUnauthenticateMessage = function (msg) {
return msg && msg.length === 2 && typeof(UNAUTHENTICATED_CALLS[msg[0]]) === 'function';
};
var handleUnauthenticatedMessage = function (Env, msg, respond, Server, netfluxId) {
Env.Log.silly('LOG_RPC', msg[0]);
var method = UNAUTHENTICATED_CALLS[msg[0]];
method(Env, msg[1], function (err, value) {
if (err) {
Env.WARN(err, msg[1]);
return void respond(err);
}
respond(err, [null, value, null]);
}, Server, netfluxId);
};
const AUTHENTICATED_USER_TARGETED = {
RESET: Pinning.resetUserPins,
PIN: Pinning.pinChannel,
UNPIN: Pinning.unpinChannel,
CLEAR_OWNED_CHANNEL: Channel.clearOwnedChannel,
REMOVE_OWNED_CHANNEL: Channel.removeOwnedChannel,
TRIM_HISTORY: Channel.trimHistory,
UPLOAD_STATUS: Upload.status,
UPLOAD: Upload.upload,
UPLOAD_COMPLETE: Upload.complete,
UPLOAD_CANCEL: Upload.cancel,
OWNED_UPLOAD_COMPLETE: Upload.complete_owned,
WRITE_LOGIN_BLOCK: Block.writeLoginBlock,
REMOVE_LOGIN_BLOCK: Block.removeLoginBlock,
ADMIN: Admin.command,
SET_METADATA: Metadata.setMetadata,
};
const AUTHENTICATED_USER_SCOPED = {
GET_HASH: Pinning.getHash,
GET_TOTAL_SIZE: Pinning.getTotalSize,
UPDATE_LIMITS: Quota.getUpdatedLimit,
GET_LIMIT: Pinning.getLimit,
EXPIRE_SESSION: Core.expireSessionAsync,
REMOVE_PINS: Pinning.removePins,
TRIM_PINS: Pinning.trimPins,
COOKIE: Core.haveACookie,
};
var isAuthenticatedCall = function (call) {
if (call === 'UPLOAD') { return false; }
return typeof(AUTHENTICATED_USER_TARGETED[call] || AUTHENTICATED_USER_SCOPED[call]) === 'function';
};
var handleAuthenticatedMessage = function (Env, unsafeKey, msg, respond, Server) {
/* If you have gotten this far, you have signed the message with the
public key which you provided.
*/
var safeKey = Util.escapeKeyCharacters(unsafeKey);
var Respond = function (e, value) {
var session = Env.Sessions[safeKey];
var token = session? session.tokens.slice(-1)[0]: '';
var cookie = Core.makeCookie(token).join('|');
respond(e ? String(e): e, [cookie].concat(typeof(value) !== 'undefined' ?value: []));
};
msg.shift();
// discard validated cookie from message
if (!msg.length) {
return void Respond('INVALID_MSG');
}
var TYPE = msg[0];
Env.Log.silly('LOG_RPC', TYPE);
if (typeof(AUTHENTICATED_USER_TARGETED[TYPE]) === 'function') {
return void AUTHENTICATED_USER_TARGETED[TYPE](Env, safeKey, msg[1], function (e, value) {
Env.WARN(e, value);
return void Respond(e, value);
}, Server);
}
if (typeof(AUTHENTICATED_USER_SCOPED[TYPE]) === 'function') {
return void AUTHENTICATED_USER_SCOPED[TYPE](Env, safeKey, function (e, value) {
if (e) {
Env.WARN(e, safeKey);
return void Respond(e);
}
Respond(e, value);
});
}
return void Respond('UNSUPPORTED_RPC_CALL', msg);
};
var rpc = function (Env, Server, userId, data, respond) {
if (!Array.isArray(data)) {
Env.Log.debug('INVALID_ARG_FORMET', data);
return void respond('INVALID_ARG_FORMAT');
}
if (!data.length) {
return void respond("INSUFFICIENT_ARGS");
} else if (data.length !== 1) {
Env.Log.debug('UNEXPECTED_ARGUMENTS_LENGTH', data);
}
var msg = data[0].slice(0);
if (!Array.isArray(msg)) {
return void respond('INVALID_ARG_FORMAT');
}
if (isUnauthenticateMessage(msg)) {
return handleUnauthenticatedMessage(Env, msg, respond, Server, userId);
}
var signature = msg.shift();
var publicKey = msg.shift();
// make sure a user object is initialized in the cookie jar
var session;
if (publicKey) {
session = Core.getSession(Env.Sessions, publicKey);
} else {
Env.Log.debug("NO_PUBLIC_KEY_PROVIDED", publicKey);
}
var cookie = msg[0];
if (!Core.isValidCookie(Env.Sessions, publicKey, cookie)) {
// no cookie is fine if the RPC is to get a cookie
if (msg[1] !== 'COOKIE') {
return void respond('NO_COOKIE');
}
}
var serialized = JSON.stringify(msg);
if (!(serialized && typeof(publicKey) === 'string')) {
return void respond('INVALID_MESSAGE_OR_PUBLIC_KEY');
}
var command = msg[1];
if (command === 'UPLOAD') {
// UPLOAD is a special case that skips signature validation
// intentional fallthrough behaviour
return void handleAuthenticatedMessage(Env, publicKey, msg, respond, Server);
}
if (isAuthenticatedCall(command)) {
// check the signature on the message
// refuse the command if it doesn't validate
return void Env.checkSignature(serialized, signature, publicKey, function (err) {
if (err) {
return void respond("INVALID_SIGNATURE_OR_PUBLIC_KEY");
}
HK.authenticateNetfluxSession(Env, userId, publicKey);
return void handleAuthenticatedMessage(Env, publicKey, msg, respond, Server);
});
}
Env.Log.warn('INVALID_RPC_CALL', command);
return void respond("INVALID_RPC_CALL");
};
RPC.create = function (Env, cb) {
var Sessions = Env.Sessions;
var updateLimitDaily = function () {
Quota.updateCachedLimits(Env, function (e) {
if (e) {
Env.WARN('limitUpdate', e);
}
});
};
Quota.applyCustomLimits(Env);
updateLimitDaily();
Env.intervals.dailyLimitUpdate = setInterval(updateLimitDaily, 24*3600*1000);
// expire old sessions once per minute
Env.intervals.sessionExpirationInterval = setInterval(function () {
Core.expireSessions(Sessions);
}, Core.SESSION_EXPIRATION_TIME);
cb(void 0, function (Server, userId, data, respond) {
try {
return rpc(Env, Server, userId, data, respond);
} catch (e) {
console.log("Error from RPC with data " + JSON.stringify(data));
console.log(e.stack);
}
});
};

172
lib/schedule.js

@ -0,0 +1,172 @@
var WriteQueue = require("./write-queue");
var Util = require("./common-util");
/* This module provides implements a FIFO scheduler
which assumes the existence of three types of async tasks:
1. ordered tasks which must be executed sequentially
2. unordered tasks which can be executed in parallel
3. blocking tasks which must block the execution of all other tasks
The scheduler assumes there will be many resources identified by strings,
and that the constraints described above will only apply in the context
of identical string ids.
Many blocking tasks may be executed in parallel so long as they
concern resources identified by different ids.
USAGE:
const schedule = require("./schedule")();
// schedule two sequential tasks using the resource 'pewpew'
schedule.ordered('pewpew', function (next) {
appendToFile('beep\n', next);
});
schedule.ordered('pewpew', function (next) {
appendToFile('boop\n', next);
});
// schedule a task that can happen whenever
schedule.unordered('pewpew', function (next) {
displayFileSize(next);
});
// schedule a blocking task which will wait
// until the all unordered tasks have completed before commencing
schedule.blocking('pewpew', function (next) {
deleteFile(next);
});
// this will be queued for after the blocking task
schedule.ordered('pewpew', function (next) {
appendFile('boom', next);
});
*/
// return a uid which is not already in a map
var unusedUid = function (set) {
var uid = Util.uid();
if (set[uid]) { return unusedUid(); }
return uid;
};
// return an existing session, creating one if it does not already exist
var lookup = function (map, id) {
return (map[id] = map[id] || {
//blocking: [],
active: {},
blocked: {},
});
};
var isEmpty = function (map) {
for (var key in map) {
if (map.hasOwnProperty(key)) { return false; }
}
return true;
};
module.exports = function () {
// every scheduler instance has its own queue
var queue = WriteQueue();
// ordered tasks don't require any extra logic
var Ordered = function (id, task) {
queue(id, task);
};
// unordered and blocking tasks need a little extra state
var map = {};
// regular garbage collection keeps memory consumption low
var collectGarbage = function (id) {
// avoid using 'lookup' since it creates a session implicitly
var local = map[id];
// bail out if no session
if (!local) { return; }
// bail out if there are blocking or active tasks
if (local.lock) { return; }
if (!isEmpty(local.active)) { return; }
// if there are no pending actions then delete the session
delete map[id];
};
// unordered tasks run immediately if there are no blocking tasks scheduled
// or immediately after blocking tasks finish
var runImmediately = function (local, task) {
// set a flag in the map of active unordered tasks
// to prevent blocking tasks from running until you finish
var uid = unusedUid(local.active);
local.active[uid] = true;
task(function () {
// remove the flag you set to indicate that your task completed
delete local.active[uid];
// don't do anything if other unordered tasks are still running
if (!isEmpty(local.active)) { return; }
// bail out if there are no blocking tasks scheduled or ready
if (typeof(local.waiting) !== 'function') {
return void collectGarbage();
}
setTimeout(local.waiting);
});
};
var runOnceUnblocked = function (local, task) {
var uid = unusedUid(local.blocked);
local.blocked[uid] = function () {
runImmediately(local, task);
};
};
// 'unordered' tasks are scheduled to run in after the most recently received blocking task
// or immediately and in parallel if there are no blocking tasks scheduled.
var Unordered = function (id, task) {
var local = lookup(map, id);
if (local.lock) { return runOnceUnblocked(local, task); }
runImmediately(local, task);
};
var runBlocked = function (local) {
for (var task in local.blocked) {
runImmediately(local, local.blocked[task]);
}
};
// 'blocking' tasks must be run alone.
// They are queued alongside ordered tasks,
// and wait until any running 'unordered' tasks complete before commencing.
var Blocking = function (id, task) {
var local = lookup(map, id);
queue(id, function (next) {
// start right away if there are no running unordered tasks
if (isEmpty(local.active)) {
local.lock = true;
return void task(function () {
delete local.lock;
runBlocked(local);
next();
});
}
// otherwise wait until the running tasks have completed
local.waiting = function () {
local.lock = true;
task(function () {
delete local.lock;
delete local.waiting;
runBlocked(local);
next();
});
};
});
};
return {
ordered: Ordered,
unordered: Unordered,
blocking: Blocking,
};
};

628
lib/storage/blob.js

@ -0,0 +1,628 @@
/* globals Buffer */
var Fs = require("fs");
var Fse = require("fs-extra");
var Path = require("path");
var BlobStore = module.exports;
var nThen = require("nthen");
var Semaphore = require("saferphore");
var Util = require("../common-util");
var isValidSafeKey = function (safeKey) {
return typeof(safeKey) === 'string' && !/\//.test(safeKey) && safeKey.length === 44;
};
var isValidId = function (id) {
return typeof(id) === 'string' && id.length === 48 && !/[^a-f0-9]/.test(id);
};
// helpers
var prependArchive = function (Env, path) {
return Path.join(Env.archivePath, path);
};
// /blob/<safeKeyPrefix>/<safeKey>/<blobPrefix>/<blobId>
var makeBlobPath = function (Env, blobId) {
return Path.join(Env.blobPath, blobId.slice(0, 2), blobId);
};
// /blobstate/<safeKeyPrefix>/<safeKey>
var makeStagePath = function (Env, safeKey) {
return Path.join(Env.blobStagingPath, safeKey.slice(0, 2), safeKey);
};
// /blob/<safeKeyPrefix>/<safeKey>/<blobPrefix>/<blobId>
var makeProofPath = function (Env, safeKey, blobId) {
return Path.join(Env.blobPath, safeKey.slice(0, 3), safeKey, blobId.slice(0, 2), blobId);
};
var parseProofPath = function (path) {
var parts = path.split('/');
return {
blobId: parts[parts.length -1],
safeKey: parts[parts.length - 3],
};
};
// getUploadSize: used by
// getFileSize
var getUploadSize = function (Env, blobId, cb) {
var path = makeBlobPath(Env, blobId);
if (!path) { return cb('INVALID_UPLOAD_ID'); }
Fs.stat(path, function (err, stats) {
if (err) {
// if a file was deleted, its size is 0 bytes
if (err.code === 'ENOENT') { return cb(void 0, 0); }
return void cb(err.code);
}
cb(void 0, stats.size);
});
};
// isFile: used by
// removeOwnedBlob
// uploadComplete
// uploadStatus
var isFile = function (filePath, cb) {
Fs.stat(filePath, function (e, stats) {
if (e) {
if (e.code === 'ENOENT') { return void cb(void 0, false); }
return void cb(e.message);
}
return void cb(void 0, stats.isFile());
});
};
var makeFileStream = function (full, _cb) {
var cb = Util.once(Util.mkAsync(_cb));
Fse.mkdirp(Path.dirname(full), function (e) {
if (e || !full) { // !full for pleasing flow, it's already checked
return void cb(e ? e.message : 'INTERNAL_ERROR');
}
try {
var stream = Fs.createWriteStream(full, {
flags: 'a',
encoding: 'binary',
highWaterMark: Math.pow(2, 16),
});
stream.on('open', function () {
cb(void 0, stream);
});
stream.on('error', function (err) {
cb(err);
});
} catch (err) {
cb('BAD_STREAM');
}
});
};
/********** METHODS **************/
var upload = function (Env, safeKey, content, cb) {
var dec;
try { dec = Buffer.from(content, 'base64'); }
catch (e) { return void cb('DECODE_BUFFER'); }
var len = dec.length;
var session = Env.getSession(safeKey);
if (typeof(session.currentUploadSize) !== 'number' ||
typeof(session.pendingUploadSize) !== 'number') {
// improperly initialized... maybe they didn't check before uploading?
// reject it, just in case
return cb('NOT_READY');
}
if (session.currentUploadSize > session.pendingUploadSize) {
return cb('E_OVER_LIMIT');
}
var stagePath = makeStagePath(Env, safeKey);
if (!session.blobstage) {
makeFileStream(stagePath, function (e, stream) {
if (!stream) { return void cb(e); }
var blobstage = session.blobstage = stream;
blobstage.write(dec);
session.currentUploadSize += len;
cb(void 0, dec.length);
});
} else {
session.blobstage.write(dec);
session.currentUploadSize += len;
cb(void 0, dec.length);
}
};
// upload_cancel
var upload_cancel = function (Env, safeKey, fileSize, cb) {
var session = Env.getSession(safeKey);
session.pendingUploadSize = fileSize;
session.currentUploadSize = 0;
if (session.blobstage) {
session.blobstage.close();
delete session.blobstage;
}
var path = makeStagePath(Env, safeKey);
Fs.unlink(path, function (e) {
if (e) { return void cb('E_UNLINK'); }
cb(void 0);
});
};
// upload_complete
var upload_complete = function (Env, safeKey, id, cb) {
var session = Env.getSession(safeKey);
if (session.blobstage && session.blobstage.close) {
session.blobstage.close();
delete session.blobstage;
}
var oldPath = makeStagePath(Env, safeKey);
var newPath = makeBlobPath(Env, id);
nThen(function (w) {
// make sure the path to your final location exists
Fse.mkdirp(Path.dirname(newPath), function (e) {
if (e) {
w.abort();
return void cb('RENAME_ERR');
}
});
}).nThen(function (w) {
// make sure there's not already something in that exact location
isFile(newPath, function (e, yes) {
if (e) {
w.abort();
return void cb(e);
}
if (yes) {
w.abort();
return void cb('RENAME_ERR');
}
cb(void 0, newPath, id);
});
}).nThen(function () {
// finally, move the old file to the new path
// FIXME we could just move and handle the EEXISTS instead of the above block
Fse.move(oldPath, newPath, function (e) {
if (e) { return void cb('RENAME_ERR'); }
cb(void 0, id);
});
});
};
var tryId = function (path, cb) {
Fs.access(path, Fs.constants.R_OK | Fs.constants.W_OK, function (e) {
if (!e) {
// generate a new id (with the same prefix) and recurse
//WARN('ownedUploadComplete', 'id is already used '+ id);
return void cb('EEXISTS');
} else if (e.code === 'ENOENT') {
// no entry, so it's safe for us to proceed
return void cb();
} else {
// it failed in an unexpected way. log it
//WARN('ownedUploadComplete', e);
return void cb(e.code);
}
});
};
// owned_upload_complete
var owned_upload_complete = function (Env, safeKey, id, cb) {
var session = Env.getSession(safeKey);
// the file has already been uploaded to the staging area
// close the pending writestream
if (session.blobstage && session.blobstage.close) {
session.blobstage.close();
delete session.blobstage;
}
if (!isValidId(id)) {
//WARN('ownedUploadComplete', "id is invalid");
return void cb('EINVAL_ID');
}
var oldPath = makeStagePath(Env, safeKey);
if (typeof(oldPath) !== 'string') {
return void cb('EINVAL_CONFIG');
}
var finalPath = makeBlobPath(Env, id);
var finalOwnPath = makeProofPath(Env, safeKey, id);
// the user wants to move it into blob and create a empty file with the same id
// in their own space:
// /blob/safeKeyPrefix/safeKey/blobPrefix/blobID
nThen(function (w) {
// make the requisite directory structure using Mkdirp
Fse.mkdirp(Path.dirname(finalPath), w(function (e /*, path */) {
if (e) { // does not throw error if the directory already existed
w.abort();
return void cb(e.code);
}
}));
Fse.mkdirp(Path.dirname(finalOwnPath), w(function (e /*, path */) {
if (e) { // does not throw error if the directory already existed
w.abort();
return void cb(e.code);
}
}));
}).nThen(function (w) {
// make sure the id does not collide with another
tryId(finalPath, w(function (e) {
if (e) {
w.abort();
return void cb(e);
}
}));
}).nThen(function (w) {
// Create the empty file proving ownership
Fs.writeFile(finalOwnPath, '', w(function (e) {
if (e) {
w.abort();
return void cb(e.code);
}
// otherwise it worked...
}));
}).nThen(function (w) {
// move the existing file to its new path
Fse.move(oldPath, finalPath, w(function (e) {
if (e) {
// if there's an error putting the file into its final location...
// ... you should remove the ownership file
Fs.unlink(finalOwnPath, function () {
// but if you can't, it's not catestrophic
// we can clean it up later
});
w.abort();
return void cb(e.code);
}
// otherwise it worked...
}));
}).nThen(function () {
// clean up their session when you're done
// call back with the blob id...
cb(void 0, id);
});
};
// removeBlob
var remove = function (Env, blobId, cb) {
var blobPath = makeBlobPath(Env, blobId);
Fs.unlink(blobPath, cb); // TODO COLDSTORAGE
};
// removeProof
var removeProof = function (Env, safeKey, blobId, cb) {
var proofPath = makeProofPath(Env, safeKey, blobId);
Fs.unlink(proofPath, cb);
};
// isOwnedBy(id, safeKey)
var isOwnedBy = function (Env, safeKey, blobId, cb) {
var proofPath = makeProofPath(Env, safeKey, blobId);
isFile(proofPath, cb);
};
// archiveBlob
var archiveBlob = function (Env, blobId, cb) {
var blobPath = makeBlobPath(Env, blobId);
var archivePath = prependArchive(Env, blobPath);
Fse.move(blobPath, archivePath, { overwrite: true }, cb);
};
var removeArchivedBlob = function (Env, blobId, cb) {
var archivePath = prependArchive(Env, makeBlobPath(Env, blobId));
Fs.unlink(archivePath, cb);
};
// restoreBlob
var restoreBlob = function (Env, blobId, cb) {
var blobPath = makeBlobPath(Env, blobId);
var archivePath = prependArchive(Env, blobPath);
Fse.move(archivePath, blobPath, cb);
};
// archiveProof
var archiveProof = function (Env, safeKey, blobId, cb) {
var proofPath = makeProofPath(Env, safeKey, blobId);
var archivePath = prependArchive(Env, proofPath);
Fse.move(proofPath, archivePath, { overwrite: true }, cb);
};
var removeArchivedProof = function (Env, safeKey, blobId, cb) {
var archivedPath = prependArchive(Env, makeProofPath(Env, safeKey, blobId));
Fs.unlink(archivedPath, cb);
};
// restoreProof
var restoreProof = function (Env, safeKey, blobId, cb) {
var proofPath = makeProofPath(Env, safeKey, blobId);
var archivePath = prependArchive(Env, proofPath);
Fse.move(archivePath, proofPath, cb);
};
var makeWalker = function (n, handleChild, done) {
if (!n || typeof(n) !== 'number' || n < 2) { n = 2; }
var W;
nThen(function (w) {
// this asynchronous bit defers the completion of this block until
// synchronous execution has completed. This means you must create
// the walker and start using it synchronously or else it will call back
// prematurely
setTimeout(w());
W = w;
}).nThen(function () {
done();
});
// do no more than 20 jobs at a time
var tasks = Semaphore.create(n);
var recurse = function (path) {
tasks.take(function (give) {
var next = give(W());
nThen(function (w) {
// check if the path is a directory...
Fs.stat(path, w(function (err, stats) {
if (err) { return next(); }
if (!stats.isDirectory()) {
w.abort();
return void handleChild(void 0, path, next);
}
// fall through
}));
}).nThen(function () {
// handle directories
Fs.readdir(path, function (err, dir) {
if (err) { return next(); }
// everything is fine and it's a directory...
dir.forEach(function (d) {
recurse(Path.join(path, d));
});
next();
});
});
});
};
return recurse;
};
var listProofs = function (root, handler, cb) {
Fs.readdir(root, function (err, dir) {
if (err) { return void cb(err); }
var walk = makeWalker(20, function (err, path, next) {
// path is the path to a child node on the filesystem
// next handles the next job in a queue
// iterate over proofs
// check for presence of corresponding files
Fs.stat(path, function (err, stats) {
if (err) {
return void handler(err, void 0, next);
}
var parsed = parseProofPath(path);
handler(void 0, {
path: path,
blobId: parsed.blobId,
safeKey: parsed.safeKey,
atime: stats.atime,
ctime: stats.ctime,
mtime: stats.mtime,
}, next);
});
}, function () {
// called when there are no more directories or children to process
cb();
});
dir.forEach(function (d) {
// ignore directories that aren't 3 characters long...
if (d.length !== 3) { return; }
walk(Path.join(root, d));
});
});
};
var listBlobs = function (root, handler, cb) {
// iterate over files
Fs.readdir(root, function (err, dir) {
if (err) { return void cb(err); }
var walk = makeWalker(20, function (err, path, next) {
Fs.stat(path, function (err, stats) {
if (err) {
return void handler(err, void 0, next);
}
handler(void 0, {
blobId: Path.basename(path),
atime: stats.atime,
ctime: stats.ctime,
mtime: stats.mtime,
}, next);
});
}, function () {
cb();
});
dir.forEach(function (d) {
if (d.length !== 2) { return; }
walk(Path.join(root, d));
});
});
};
BlobStore.create = function (config, _cb) {
var cb = Util.once(Util.mkAsync(_cb));
if (typeof(config.getSession) !== 'function') {
return void cb("getSession method required");
}
var Env = {
blobPath: config.blobPath || './blob',
blobStagingPath: config.blobStagingPath || './blobstage',
archivePath: config.archivePath || './data/archive',
getSession: config.getSession,
};
nThen(function (w) {
var CB = Util.both(w.abort, cb);
Fse.mkdirp(Env.blobPath, w(function (e) {
if (e) { CB(e); }
}));
Fse.mkdirp(Env.blobStagingPath, w(function (e) {
if (e) { CB(e); }
}));
Fse.mkdirp(Path.join(Env.archivePath, Env.blobPath), w(function (e) {
if (e) { CB(e); }
}));
}).nThen(function () {
var methods = {
isFileId: isValidId,
status: function (safeKey, _cb) {
// TODO check if the final destination is a file
// because otherwise two people can try to upload to the same location
// and one will fail, invalidating their hard work
var cb = Util.once(Util.mkAsync(_cb));
if (!isValidSafeKey(safeKey)) { return void cb('INVALID_SAFEKEY'); }
isFile(makeStagePath(Env, safeKey), cb);
},
upload: function (safeKey, content, _cb) {
var cb = Util.once(Util.mkAsync(_cb));
if (!isValidSafeKey(safeKey)) { return void cb('INVALID_SAFEKEY'); }
upload(Env, safeKey, content, Util.once(Util.mkAsync(cb)));
},
cancel: function (safeKey, fileSize, _cb) {
var cb = Util.once(Util.mkAsync(_cb));
if (!isValidSafeKey(safeKey)) { return void cb('INVALID_SAFEKEY'); }
if (typeof(fileSize) !== 'number' || isNaN(fileSize) || fileSize <= 0) { return void cb("INVALID_FILESIZE"); }
upload_cancel(Env, safeKey, fileSize, cb);
},
isOwnedBy: function (safeKey, blobId, _cb) {
var cb = Util.once(Util.mkAsync(_cb));
if (!isValidSafeKey(safeKey)) { return void cb('INVALID_SAFEKEY'); }
isOwnedBy(Env, safeKey, blobId, cb);
},
remove: {
blob: function (blobId, _cb) {
var cb = Util.once(Util.mkAsync(_cb));
if (!isValidId(blobId)) { return void cb("INVALID_ID"); }
remove(Env, blobId, cb);
},
proof: function (safeKey, blobId, _cb) {
var cb = Util.once(Util.mkAsync(_cb));
if (!isValidSafeKey(safeKey)) { return void cb('INVALID_SAFEKEY'); }
if (!isValidId(blobId)) { return void cb("INVALID_ID"); }
removeProof(Env, safeKey, blobId, cb);
},
archived: {
blob: function (blobId, _cb) {
var cb = Util.once(Util.mkAsync(_cb));
if (!isValidId(blobId)) { return void cb("INVALID_ID"); }
removeArchivedBlob(Env, blobId, cb);
},
proof: function (safeKey, blobId, _cb) {
var cb = Util.once(Util.mkAsync(_cb));
if (!isValidSafeKey(safeKey)) { return void cb('INVALID_SAFEKEY'); }
if (!isValidId(blobId)) { return void cb("INVALID_ID"); }
removeArchivedProof(Env, safeKey, blobId, cb);
},
},
},
archive: {
blob: function (blobId, _cb) {
var cb = Util.once(Util.mkAsync(_cb));
if (!isValidId(blobId)) { return void cb("INVALID_ID"); }
archiveBlob(Env, blobId, cb);
},
proof: function (safeKey, blobId, _cb) {
var cb = Util.once(Util.mkAsync(_cb));
if (!isValidSafeKey(safeKey)) { return void cb('INVALID_SAFEKEY'); }
if (!isValidId(blobId)) { return void cb("INVALID_ID"); }
archiveProof(Env, safeKey, blobId, cb);
},
},
restore: {
blob: function (blobId, _cb) {
var cb = Util.once(Util.mkAsync(_cb));
if (!isValidId(blobId)) { return void cb("INVALID_ID"); }
restoreBlob(Env, blobId, cb);
},
proof: function (safeKey, blobId, _cb) {
var cb = Util.once(Util.mkAsync(_cb));
if (!isValidSafeKey(safeKey)) { return void cb('INVALID_SAFEKEY'); }
if (!isValidId(blobId)) { return void cb("INVALID_ID"); }
restoreProof(Env, safeKey, blobId, cb);
},
},
complete: function (safeKey, id, _cb) {
var cb = Util.once(Util.mkAsync(_cb));
if (!isValidSafeKey(safeKey)) { return void cb('INVALID_SAFEKEY'); }
if (!isValidId(id)) { return void cb("INVALID_ID"); }
upload_complete(Env, safeKey, id, cb);
},
completeOwned: function (safeKey, id, _cb) {
var cb = Util.once(Util.mkAsync(_cb));
if (!isValidSafeKey(safeKey)) { return void cb('INVALID_SAFEKEY'); }
if (!isValidId(id)) { return void cb("INVALID_ID"); }
owned_upload_complete(Env, safeKey, id, cb);
},
size: function (id, _cb) {
var cb = Util.once(Util.mkAsync(_cb));
if (!isValidId(id)) { return void cb("INVALID_ID"); }
getUploadSize(Env, id, cb);
},
list: {
blobs: function (handler, _cb) {
var cb = Util.once(Util.mkAsync(_cb));
listBlobs(Env.blobPath, handler, cb);
},
proofs: function (handler, _cb) {
var cb = Util.once(Util.mkAsync(_cb));
listProofs(Env.blobPath, handler, cb);
},
archived: {
proofs: function (handler, _cb) {
var cb = Util.once(Util.mkAsync(_cb));
listProofs(prependArchive(Env, Env.blobPath), handler, cb);
},
blobs: function (handler, _cb) {
var cb = Util.once(Util.mkAsync(_cb));
listBlobs(prependArchive(Env, Env.blobPath), handler, cb);
},
}
},
};
cb(void 0, methods);
});
};

1260
lib/storage/file.js
File diff suppressed because it is too large
View File

397
lib/storage/tasks.js

@ -0,0 +1,397 @@
var Fs = require("fs");
var Fse = require("fs-extra");
var Path = require("path");
var nacl = require("tweetnacl/nacl-fast");
var nThen = require("nthen");
var Tasks = module.exports;
var tryParse = function (s) {
try { return JSON.parse(s); }
catch (e) { return null; }
};
var encode = function (time, command, args) {
if (typeof(time) !== 'number') { return null; }
if (typeof(command) !== 'string') { return null; }
if (!Array.isArray(args)) { return [time, command]; }
return [time, command].concat(args);
};
/*
var randomId = function () {
var bytes = Array.prototype.slice.call(nacl.randomBytes(16));
return bytes.map(function (b) {
var n = Number(b & 0xff).toString(16);
return n.length === 1? '0' + n: n;
}).join('');
};
var mkPath = function (env, id) {
return Path.join(env.root, id.slice(0, 2), id) + '.ndjson';
};
*/
// make a new folder every MODULUS ms
var MODULUS = 1000 * 60 * 60 * 24; // one day
var moduloTime = function (d) {
return d - (d % MODULUS);
};
var makeDirectoryId = function (d) {
return '' + moduloTime(d);
};
var write = function (env, task, cb) {
var str = JSON.stringify(task) + '\n';
var id = nacl.util.encodeBase64(nacl.hash(nacl.util.decodeUTF8(str))).replace(/\//g, '-');
var dir = makeDirectoryId(task[0]);
var path = Path.join(env.root, dir);
nThen(function (w) {
// create the parent directory if it does not exist
Fse.mkdirp(path, 0x1ff, w(function (err) {
if (err) {
w.abort();
return void cb(err);
}
}));
}).nThen(function () {
// write the file to the path
var fullPath = Path.join(path, id + '.ndjson');
// the file ids are based on the hash of the file contents to be written
// as such, writing an exact task a second time will overwrite the first with the same contents
// this shouldn't be a problem
Fs.writeFile(fullPath, str, function (e) {
if (e) {
env.log.error("TASK_WRITE_FAILURE", {
error: e,
path: fullPath,
});
return void cb(e);
}
env.log.info("SUCCESSFUL_WRITE", {
path: fullPath,
});
cb();
});
});
};
var remove = function (env, path, cb) {
// FIXME COLDSTORAGE?
Fs.unlink(path, cb);
};
var removeDirectory = function (env, path, cb) {
Fs.rmdir(path, cb);
};
var list = Tasks.list = function (env, cb, migration) {
var rootDirs;
nThen(function (w) {
// read the root directory
Fs.readdir(env.root, w(function (e, list) {
if (e) {
env.log.error("TASK_ROOT_DIR", {
root: env.root,
error: e,
});
w.abort();
return void cb(e);
}
if (list.length === 0) {
w.abort();
return void cb(void 0, []);
}
rootDirs = list;
}));
}).nThen(function () {
// schedule the nested directories for exploration
// return a list of paths to tasks
var queue = nThen(function () {});
var allPaths = [];
var currentWindow = moduloTime(+new Date() + MODULUS);
// We prioritize a small footprint over speed, so we
// iterate over directories in serial rather than parallel
rootDirs.forEach(function (dir) {
// if a directory is two characters, it's the old format
// otherwise, it indicates when the file is set to expire
// so we can ignore directories which are clearly in the future
var dirTime;
if (migration) {
// this block handles migrations. ignore new formats
if (dir.length !== 2) {
return;
}
} else {
// not in migration mode, check if it's a new format
if (dir.length >= 2) {
// might be the new format.
// check its time to see if it should be skipped
dirTime = parseInt(dir);
if (!isNaN(dirTime) && dirTime >= currentWindow) {
return;
}
}
}
queue.nThen(function (w) {
var subPath = Path.join(env.root, dir);
Fs.readdir(subPath, w(function (e, paths) {
if (e) {
env.log.error("TASKS_INVALID_SUBDIR", {
path: subPath,
error: e,
});
return;
}
if (paths.length === 0) {
removeDirectory(env, subPath, function (err) {
if (err) {
env.log.error('TASKS_REMOVE_EMPTY_DIRECTORY', {
error: err,
path: subPath,
});
}
});
}
// concat in place
Array.prototype.push.apply(allPaths, paths.map(function (p) {
return Path.join(subPath, p);
}));
}));
});
});
queue.nThen(function () {
cb(void 0, allPaths);
});
});
};
var read = function (env, filePath, cb) {
Fs.readFile(filePath, 'utf8', function (e, str) {
if (e) { return void cb(e); }
var task = tryParse(str);
if (!Array.isArray(task) || task.length < 2) {
env.log("INVALID_TASK", {
path: filePath,
task: task,
});
return cb(new Error('INVALID_TASK'));
}
cb(void 0, task);
});
};
var expire = function (env, task, cb) {
// TODO magic numbers, maybe turn task parsing into a function
// and also maybe just encode tasks in a better format to start...
var Log = env.log;
var args = task.slice(2);
Log.info('ARCHIVAL_SCHEDULED_EXPIRATION', {
task: task,
});
env.store.archiveChannel(args[0], function (err) {
if (err) {
Log.error('ARCHIVE_SCHEDULED_EXPIRATION_ERROR', {
task: task,
error: err,
});
}
cb();
});
};
var run = Tasks.run = function (env, path, cb) {
var CURRENT = +new Date();
var Log = env.log;
var task, time, command, args;
nThen(function (w) {
read(env, path, w(function (err, _task) {
if (err) {
w.abort();
// there was a file but it wasn't valid?
return void cb(err);
}
task = _task;
time = task[0];
if (time > CURRENT) {
w.abort();
return cb();
}
command = task[1];
args = task.slice(2);
}));
}).nThen(function (w) {
switch (command) {
case 'EXPIRE':
return void expire(env, task, w());
default:
Log.warn("TASKS_UNKNOWN_COMMAND", task);
}
}).nThen(function () {
// remove the task file...
remove(env, path, function (err) {
if (err) {
Log.error('TASKS_RECORD_REMOVAL', {
path: path,
err: err,
});
}
cb();
});
});
};
var runAll = function (env, cb) {
// check if already running and bail out if so
if (env.running) {
return void cb("TASK_CONCURRENCY");
}
// if not, set a flag to block concurrency and proceed
env.running = true;
var paths;
nThen(function (w) {
list(env, w(function (err, _paths) {
if (err) {
w.abort();
env.running = false;
return void cb(err);
}
paths = _paths;
}));
}).nThen(function (w) {
var done = w();
var nt = nThen(function () {});
paths.forEach(function (path) {
nt = nt.nThen(function (w) {
run(env, path, w(function (err) {
if (err) {
// Any errors are already logged in 'run'
// the admin will need to review the logs and clean up
}
}));
});
});
nt = nt.nThen(function () {
done();
});
}).nThen(function (/*w*/) {
env.running = false;
cb();
});
};
var migrate = function (env, cb) {
// list every task
list(env, function (err, paths) {
if (err) {
return void cb(err);
}
var nt = nThen(function () {});
paths.forEach(function (path) {
var bypass;
var task;
nt = nt.nThen(function (w) {
// read
read(env, path, w(function (err, _task) {
if (err) {
bypass = true;
env.log.error("TASK_MIGRATION_READ", {
error: err,
path: path,
});
return;
}
task = _task;
}));
}).nThen(function (w) {
if (bypass) { return; }
// rewrite in new format
write(env, task, w(function (err) {
if (err) {
bypass = true;
env.log.error("TASK_MIGRATION_WRITE", {
error: err,
task: task,
});
}
}));
}).nThen(function (w) {
if (bypass) { return; }
// remove
remove(env, path, w(function (err) {
if (err) {
env.log.error("TASK_MIGRATION_REMOVE", {
error: err,
path: path,
});
}
}));
});
});
nt = nt.nThen(function () {
cb();
});
}, true);
};
Tasks.create = function (config, cb) {
if (!config.store) { throw new Error("E_STORE_REQUIRED"); }
if (!config.log) { throw new Error("E_LOG_REQUIRED"); }
var env = {
root: config.taskPath || './tasks',
log: config.log,
store: config.store,
};
// make sure the path exists...
Fse.mkdirp(env.root, 0x1ff, function (err) {
if (err) { return void cb(err); }
cb(void 0, {
write: function (time, command, args, cb) {
var task = encode(time, command, args);
write(env, task, cb);
},
list: function (olderThan, cb) {
list(env, olderThan, cb);
},
remove: function (id, cb) {
remove(env, id, cb);
},
run: function (id, cb) {
run(env, id, cb);
},
runAll: function (cb) {
runAll(env, cb);
},
migrate: function (cb) {
migrate(env, cb);
},
});
});
};

84
lib/stream-file.js

@ -0,0 +1,84 @@
/* jshint esversion: 6 */
/* global Buffer */
const ToPull = require('stream-to-pull-stream');
const Pull = require('pull-stream');
const Stream = module.exports;
// transform a stream of arbitrarily divided data
// into a stream of buffers divided by newlines in the source stream
// TODO see if we could improve performance by using libnewline
const NEWLINE_CHR = ('\n').charCodeAt(0);
const mkBufferSplit = () => {
let remainder = null;
return Pull((read) => {
return (abort, cb) => {
read(abort, function (end, data) {
if (end) {
if (data) { console.log("mkBufferSplit() Data at the end"); }
cb(end, remainder ? [remainder, data] : [data]);
remainder = null;
return;
}
const queue = [];
for (;;) {
const offset = data.indexOf(NEWLINE_CHR);
if (offset < 0) {
remainder = remainder ? Buffer.concat([remainder, data]) : data;
break;
}
let subArray = data.slice(0, offset);
if (remainder) {
subArray = Buffer.concat([remainder, subArray]);
remainder = null;
}
queue.push(subArray);
data = data.slice(offset + 1);
}
cb(end, queue);
});
};
}, Pull.flatten());
};
// return a streaming function which transforms buffers into objects
// containing the buffer and the offset from the start of the stream
const mkOffsetCounter = () => {
let offset = 0;
return Pull.map((buff) => {
const out = { offset: offset, buff: buff };
// +1 for the eaten newline
offset += buff.length + 1;
return out;
});
};
// readMessagesBin asynchronously iterates over the messages in a channel log
// the handler for each message must call back to read more, which should mean
// that this function has a lower memory profile than our classic method
// of reading logs line by line.
// it also allows the handler to abort reading at any time
Stream.readFileBin = (stream, msgHandler, cb) => {
//const stream = Fs.createReadStream(path, { start: start });
let keepReading = true;
Pull(
ToPull.read(stream),
mkBufferSplit(),
mkOffsetCounter(),
Pull.asyncMap((data, moreCb) => {
msgHandler(data, moreCb, () => {
try {
stream.close();
} catch (err) {
console.error("READ_FILE_BIN_ERR", err);
}
keepReading = false;
moreCb();
});
}),
Pull.drain(() => (keepReading), (err) => {
cb((keepReading) ? err : undefined);
})
);
};

576
lib/workers/db-worker.js

@ -0,0 +1,576 @@
/* jshint esversion: 6 */
/* global process */
const HK = require("../hk-util");
const Store = require("../storage/file");
const BlobStore = require("../storage/blob");
const Util = require("../common-util");
const nThen = require("nthen");
const Meta = require("../metadata");
const Pins = require("../pins");
const Core = require("../commands/core");
const Saferphore = require("saferphore");
const Logger = require("../log");
const Tasks = require("../storage/tasks");
const Nacl = require('tweetnacl/nacl-fast');
const Env = {
Log: {},
};
// support the usual log API but pass it to the main process
Logger.levels.forEach(function (level) {
Env.Log[level] = function (label, info) {
process.send({
log: level,
label: label,
info: info,
});
};
});
var ready = false;
var store;
var pinStore;
var blobStore;
const init = function (config, _cb) {
const cb = Util.once(Util.mkAsync(_cb));
if (!config) {
return void cb('E_INVALID_CONFIG');
}
nThen(function (w) {
Store.create(config, w(function (err, _store) {
if (err) {
w.abort();
return void cb(err);
}
store = _store;
}));
Store.create({
filePath: config.pinPath,
}, w(function (err, _pinStore) {
if (err) {
w.abort();
return void cb(err);
}
pinStore = _pinStore;
}));
BlobStore.create({
blobPath: config.blobPath,
blobStagingPath: config.blobStagingPath,
archivePath: config.archivePath,
getSession: function () {},
}, w(function (err, blob) {
if (err) {
w.abort();
return void cb(err);
}
blobStore = blob;
}));
}).nThen(function (w) {
Tasks.create({
log: Env.Log,
taskPath: config.taskPath,
store: store,
}, w(function (err, tasks) {
if (err) {
w.abort();
return void cb(err);
}
Env.tasks = tasks;
}));
}).nThen(function () {
cb();
});
};
/* computeIndex
can call back with an error or a computed index which includes:
* cpIndex:
* array including any checkpoints pushed within the last 100 messages
* processed by 'sliceCpIndex(cpIndex, line)'
* offsetByHash:
* a map containing message offsets by their hash
* this is for every message in history, so it could be very large...
* except we remove offsets from the map if they occur before the oldest relevant checkpoint
* size: in bytes
* metadata:
* validationKey
* expiration time
* owners
* ??? (anything else we might add in the future)
* line
* the number of messages in history
* including the initial metadata line, if it exists
*/
const computeIndex = function (data, cb) {
if (!data || !data.channel) {
return void cb('E_NO_CHANNEL');
}
const channelName = data.channel;
const cpIndex = [];
let messageBuf = [];
let i = 0;
const CB = Util.once(cb);
const offsetByHash = {};
let offsetCount = 0;
let size = 0;
nThen(function (w) {
// iterate over all messages in the channel log
// old channels can contain metadata as the first message of the log
// skip over metadata as that is handled elsewhere
// otherwise index important messages in the log
store.readMessagesBin(channelName, 0, (msgObj, readMore) => {
let msg;
// keep an eye out for the metadata line if you haven't already seen it
// but only check for metadata on the first line
if (!i && msgObj.buff.indexOf('{') === 0) {
i++; // always increment the message counter
msg = HK.tryParse(Env, msgObj.buff.toString('utf8'));
if (typeof msg === "undefined") { return readMore(); }
// validate that the current line really is metadata before storing it as such
// skip this, as you already have metadata...
if (HK.isMetadataMessage(msg)) { return readMore(); }
}
i++;
if (msgObj.buff.indexOf('cp|') > -1) {
msg = msg || HK.tryParse(Env, msgObj.buff.toString('utf8'));
if (typeof msg === "undefined") { return readMore(); }
// cache the offsets of checkpoints if they can be parsed
if (msg[2] === 'MSG' && msg[4].indexOf('cp|') === 0) {
cpIndex.push({
offset: msgObj.offset,
line: i
});
// we only want to store messages since the latest checkpoint
// so clear the buffer every time you see a new one
messageBuf = [];
}
} else if (messageBuf.length > 100 && cpIndex.length === 0) {
// take the last 50 messages
messageBuf = messageBuf.slice(-50);
}
// if it's not metadata or a checkpoint then it should be a regular message
// store it in the buffer
messageBuf.push(msgObj);
return readMore();
}, w((err) => {
if (err && err.code !== 'ENOENT') {
w.abort();
return void CB(err);
}
// once indexing is complete you should have a buffer of messages since the latest checkpoint
// or the 50-100 latest messages if the channel is of a type without checkpoints.
// map the 'hash' of each message to its byte offset in the log, to be used for reconnecting clients
messageBuf.forEach((msgObj) => {
const msg = HK.tryParse(Env, msgObj.buff.toString('utf8'));
if (typeof msg === "undefined") { return; }
if (msg[0] === 0 && msg[2] === 'MSG' && typeof(msg[4]) === 'string') {
// msgObj.offset is API guaranteed by our storage module
// it should always be a valid positive integer
offsetByHash[HK.getHash(msg[4])] = msgObj.offset;
offsetCount++;
}
// There is a trailing \n at the end of the file
size = msgObj.offset + msgObj.buff.length + 1;
});
}));
}).nThen(function () {
// return the computed index
CB(null, {
// Only keep the checkpoints included in the last 100 messages
cpIndex: HK.sliceCpIndex(cpIndex, i),
offsetByHash: offsetByHash,
offsets: offsetCount,
size: size,
//metadata: metadata,
line: i
});
});
};
const computeMetadata = function (data, cb) {
const ref = {};
const lineHandler = Meta.createLineHandler(ref, Env.Log.error);
return void store.readChannelMetadata(data.channel, lineHandler, function (err) {
if (err) {
// stream errors?
return void cb(err);
}
cb(void 0, ref.meta);
});
};
/* getOlderHistory
* allows clients to query for all messages until a known hash is read
* stores all messages in history as they are read
* can therefore be very expensive for memory
* should probably be converted to a streaming interface
Used by:
* GET_HISTORY_RANGE
*/
const getOlderHistory = function (data, cb) {
const oldestKnownHash = data.hash;
const channelName = data.channel;
const desiredMessages = data.desiredMessages;
const desiredCheckpoint = data.desiredCheckpoint;
var messages = [];
var found = false;
store.getMessages(channelName, function (msgStr) {
if (found) { return; }
let parsed = HK.tryParse(Env, msgStr);
if (typeof parsed === "undefined") { return; }
// identify classic metadata messages by their inclusion of a channel.
// and don't send metadata, since:
// 1. the user won't be interested in it
// 2. this metadata is potentially incomplete/incorrect
if (HK.isMetadataMessage(parsed)) { return; }
var content = parsed[4];
if (typeof(content) !== 'string') { return; }
var hash = HK.getHash(content);
if (hash === oldestKnownHash) {
found = true;
}
messages.push(parsed);
}, function (err) {
var toSend = [];
if (typeof (desiredMessages) === "number") {
toSend = messages.slice(-desiredMessages);
} else {
let cpCount = 0;
for (var i = messages.length - 1; i >= 0; i--) {
if (/^cp\|/.test(messages[i][4]) && i !== (messages.length - 1)) {
cpCount++;
}
toSend.unshift(messages[i]);
if (cpCount >= desiredCheckpoint) { break; }
}
}
cb(err, toSend);
});
};
const getPinState = function (data, cb) {
const safeKey = data.key;
var ref = {};
var lineHandler = Pins.createLineHandler(ref, Env.Log.error);
// if channels aren't in memory. load them from disk
// TODO replace with readMessagesBin
pinStore.getMessages(safeKey, lineHandler, function () {
cb(void 0, ref.pins); // FIXME no error handling?
});
};
const _getFileSize = function (channel, _cb) {
var cb = Util.once(Util.mkAsync(_cb));
if (!Core.isValidId(channel)) { return void cb('INVALID_CHAN'); }
if (channel.length === 32) {
return void store.getChannelSize(channel, function (e, size) {
if (e) {
if (e.code === 'ENOENT') { return void cb(void 0, 0); }
return void cb(e.code);
}
cb(void 0, size);
});
}
// 'channel' refers to a file, so you need another API
blobStore.size(channel, function (e, size) {
if (typeof(size) === 'undefined') { return void cb(e); }
cb(void 0, size);
});
};
const getFileSize = function (data, cb) {
_getFileSize(data.channel, cb);
};
const _iterateFiles = function (channels, handler, cb) {
if (!Array.isArray(channels)) { return cb('INVALID_LIST'); }
var L = channels.length;
var sem = Saferphore.create(10);
// (channel, next) => { ??? }
var job = function (channel, wait) {
return function (give) {
handler(channel, wait(give()));
};
};
nThen(function (w) {
for (var i = 0; i < L; i++) {
sem.take(job(channels[i], w));
}
}).nThen(function () {
cb();
});
};
const getTotalSize = function (data, cb) {
var bytes = 0;
_iterateFiles(data.channels, function (channel, next) {
_getFileSize(channel, function (err, size) {
if (!err) { bytes += size; }
next();
});
}, function (err) {
if (err) { return cb(err); }
cb(void 0, bytes);
});
};
const getDeletedPads = function (data, cb) {
var absentees = [];
_iterateFiles(data.channels, function (channel, next) {
_getFileSize(channel, function (err, size) {
if (err) { return next(); }
if (size === 0) { absentees.push(channel); }
next();
});
}, function (err) {
if (err) { return void cb(err); }
cb(void 0, absentees);
});
};
const getMultipleFileSize = function (data, cb) {
const counts = {};
_iterateFiles(data.channels, function (channel, next) {
_getFileSize(channel, function (err, size) {
counts[channel] = err? 0: size;
next();
});
}, function (err) {
if (err) {
return void cb(err);
}
cb(void 0, counts);
});
};
const getHashOffset = function (data, cb) {
const channelName = data.channel;
const lastKnownHash = data.hash;
if (typeof(lastKnownHash) !== 'string') { return void cb("INVALID_HASH"); }
var offset = -1;
store.readMessagesBin(channelName, 0, (msgObj, readMore, abort) => {
// tryParse return a parsed message or undefined
const msg = HK.tryParse(Env, msgObj.buff.toString('utf8'));
// if it was undefined then go onto the next message
if (typeof msg === "undefined") { return readMore(); }
if (typeof(msg[4]) !== 'string' || lastKnownHash !== HK.getHash(msg[4])) {
return void readMore();
}
offset = msgObj.offset;
abort();
}, function (err) {
if (err) { return void cb(err); }
cb(void 0, offset);
});
};
const removeOwnedBlob = function (data, cb) {
const blobId = data.blobId;
const safeKey = data.safeKey;
nThen(function (w) {
// check if you have permissions
blobStore.isOwnedBy(safeKey, blobId, w(function (err, owned) {
if (err || !owned) {
w.abort();
return void cb("INSUFFICIENT_PERMISSIONS");
}
}));
}).nThen(function (w) {
// remove the blob
blobStore.archive.blob(blobId, w(function (err) {
Env.Log.info('ARCHIVAL_OWNED_FILE_BY_OWNER_RPC', {
safeKey: safeKey,
blobId: blobId,
status: err? String(err): 'SUCCESS',
});
if (err) {
w.abort();
return void cb(err);
}
}));
}).nThen(function () {
// archive the proof
blobStore.archive.proof(safeKey, blobId, function (err) {
Env.Log.info("ARCHIVAL_PROOF_REMOVAL_BY_OWNER_RPC", {
safeKey: safeKey,
blobId: blobId,
status: err? String(err): 'SUCCESS',
});
if (err) {
return void cb("E_PROOF_REMOVAL");
}
cb(void 0, 'OK');
});
});
};
const runTasks = function (data, cb) {
Env.tasks.runAll(cb);
};
const writeTask = function (data, cb) {
Env.tasks.write(data.time, data.task_command, data.args, cb);
};
const COMMANDS = {
COMPUTE_INDEX: computeIndex,
COMPUTE_METADATA: computeMetadata,
GET_OLDER_HISTORY: getOlderHistory,
GET_PIN_STATE: getPinState,
GET_FILE_SIZE: getFileSize,
GET_TOTAL_SIZE: getTotalSize,
GET_DELETED_PADS: getDeletedPads,
GET_MULTIPLE_FILE_SIZE: getMultipleFileSize,
GET_HASH_OFFSET: getHashOffset,
REMOVE_OWNED_BLOB: removeOwnedBlob,
RUN_TASKS: runTasks,
WRITE_TASK: writeTask,
};
COMMANDS.INLINE = function (data, cb) {
var signedMsg;
try {
signedMsg = Nacl.util.decodeBase64(data.msg);
} catch (e) {
return void cb('E_BAD_MESSAGE');
}
var validateKey;
try {
validateKey = Nacl.util.decodeBase64(data.key);
} catch (e) {
return void cb("E_BADKEY");
}
// validate the message
const validated = Nacl.sign.open(signedMsg, validateKey);
if (!validated) {
return void cb("FAILED");
}
cb();
};
const checkDetachedSignature = function (signedMsg, signature, publicKey) {
if (!(signedMsg && publicKey)) { return false; }
var signedBuffer;
var pubBuffer;
var signatureBuffer;
try {
signedBuffer = Nacl.util.decodeUTF8(signedMsg);
} catch (e) {
throw new Error("INVALID_SIGNED_BUFFER");
}
try {
pubBuffer = Nacl.util.decodeBase64(publicKey);
} catch (e) {
throw new Error("INVALID_PUBLIC_KEY");
}
try {
signatureBuffer = Nacl.util.decodeBase64(signature);
} catch (e) {
throw new Error("INVALID_SIGNATURE");
}
if (pubBuffer.length !== 32) {
throw new Error("INVALID_PUBLIC_KEY_LENGTH");
}
if (signatureBuffer.length !== 64) {
throw new Error("INVALID_SIGNATURE_LENGTH");
}
if (Nacl.sign.detached.verify(signedBuffer, signatureBuffer, pubBuffer) !== true) {
throw new Error("FAILED");
}
};
COMMANDS.DETACHED = function (data, cb) {
try {
checkDetachedSignature(data.msg, data.sig, data.key);
} catch (err) {
return void cb(err && err.message);
}
cb();
};
COMMANDS.HASH_CHANNEL_LIST = function (data, cb) {
var channels = data.channels;
if (!Array.isArray(channels)) { return void cb('INVALID_CHANNEL_LIST'); }
var uniques = [];
channels.forEach(function (a) {
if (uniques.indexOf(a) === -1) { uniques.push(a); }
});
uniques.sort();
var hash = Nacl.util.encodeBase64(Nacl.hash(Nacl
.util.decodeUTF8(JSON.stringify(uniques))));
cb(void 0, hash);
};
process.on('message', function (data) {
if (!data || !data.txid || !data.pid) {
return void process.send({
error:'E_INVAL',
data: data,
});
}
const cb = function (err, value) {
process.send({
error: err,
txid: data.txid,
pid: data.pid,
value: value,
});
};
if (!ready) {
return void init(data.config, function (err) {
if (err) { return void cb(err); }
ready = true;
cb();
});
}
const command = COMMANDS[data.command];
if (typeof(command) !== 'function') {
return void cb("E_BAD_COMMAND");
}
command(data, cb);
});
process.on('uncaughtException', function (err) {
console.error('[%s] UNCAUGHT EXCEPTION IN DB WORKER', new Date());
console.error(err);
console.error("TERMINATING");
process.exit(1);
});

367
lib/workers/index.js

@ -0,0 +1,367 @@
/* jshint esversion: 6 */
/* global process */
const Util = require("../common-util");
const nThen = require('nthen');
const OS = require("os");
const { fork } = require('child_process');
const Workers = module.exports;
const PID = process.pid;
const DB_PATH = 'lib/workers/db-worker';
const MAX_JOBS = 16;
Workers.initialize = function (Env, config, _cb) {
var cb = Util.once(Util.mkAsync(_cb));
const workers = [];
const response = Util.response(function (errLabel, info) {
Env.Log.error('HK_DB_WORKER__' + errLabel, info);
});
const Log = Env.Log;
const handleLog = function (level, label, info) {
if (typeof(Log[level]) !== 'function') { return; }
Log[level](label, info);
};
var isWorker = function (value) {
return value && value.worker && typeof(value.worker.send) === 'function';
};
// pick ids that aren't already in use...
const guid = function () {
var id = Util.uid();
return response.expected(id)? guid(): id;
};
var workerOffset = -1;
var queue = [];
var getAvailableWorkerIndex = function () {
// If there is already a backlog of tasks you can avoid some work
// by going to the end of the line
if (queue.length) { return -1; }
var L = workers.length;
if (L === 0) {
Log.error('NO_WORKERS_AVAILABLE', {
queue: queue.length,
});
return -1;
}
// cycle through the workers once
// start from a different offset each time
// return -1 if none are available
workerOffset = (workerOffset + 1) % L;
var temp;
for (let i = 0; i < L; i++) {
temp = (workerOffset + i) % L;
/* I'd like for this condition to be more efficient
(`Object.keys` is sub-optimal) but I found some bugs in my initial
implementation stemming from a task counter variable going out-of-sync
with reality when a worker crashed and its tasks were re-assigned to
its substitute. I'm sure it can be done correctly and efficiently,
but this is a relatively easy way to make sure it's always up to date.
We'll see how it performs in practice before optimizing.
*/
if (workers[temp] && Object.keys(workers[temp]).length < MAX_JOBS) {
return temp;
}
}
return -1;
};
var sendCommand = function (msg, _cb) {
var index = getAvailableWorkerIndex();
var state = workers[index];
// if there is no worker available:
if (!isWorker(state)) {
// queue the message for when one becomes available
queue.push({
msg: msg,
cb: _cb,
});
return;
}
var cb = Util.once(Util.mkAsync(_cb));
const txid = guid();
msg.txid = txid;
msg.pid = PID;
// track which worker is doing which jobs
state.tasks[txid] = msg;
response.expect(txid, cb, 60000);
state.worker.send(msg);
};
var handleResponse = function (state, res) {
if (!res) { return; }
// handle log messages before checking if it was addressed to your PID
// it might still be useful to know what happened inside an orphaned worker
if (res.log) {
return void handleLog(res.log, res.label, res.info);
}
// but don't bother handling things addressed to other processes
// since it's basically guaranteed not to work
if (res.pid !== PID) {
return void Log.error("WRONG_PID", res);
}
if (!res.txid) { return; }
response.handle(res.txid, [res.error, res.value]);
delete state.tasks[res.txid];
if (!queue.length) { return; }
var nextMsg = queue.shift();
/* `nextMsg` was at the top of the queue.
We know that a job just finished and all of this code
is synchronous, so calling `sendCommand` should take the worker
which was just freed up. This is somewhat fragile though, so
be careful if you want to modify this block. The risk is that
we take something that was at the top of the queue and push it
to the back because the following msg took its place. OR, in an
even worse scenario, we cycle through the queue but don't run anything.
*/
sendCommand(nextMsg.msg, nextMsg.cb);
};
const initWorker = function (worker, cb) {
const txid = guid();
const state = {
worker: worker,
tasks: {},
};
response.expect(txid, function (err) {
if (err) { return void cb(err); }
workers.push(state);
cb(void 0, state);
}, 15000);
worker.send({
pid: PID,
txid: txid,
config: config,
});
worker.on('message', function (res) {
handleResponse(state, res);
});
var substituteWorker = Util.once(function () {
Env.Log.info("SUBSTITUTE_DB_WORKER", '');
var idx = workers.indexOf(state);
if (idx !== -1) {
workers.splice(idx, 1);
}
Object.keys(state.tasks).forEach(function (txid) {
const cb = response.expectation(txid);
if (typeof(cb) !== 'function') { return; }
const task = state.tasks[txid];
if (!task && task.msg) { return; }
response.clear(txid);
Log.info('DB_WORKER_RESEND', task.msg);
sendCommand(task.msg, cb);
});
var w = fork(DB_PATH);
initWorker(w, function (err, state) {
if (err) {
throw new Error(err);
}
workers.push(state);
});
});
worker.on('exit', substituteWorker);
worker.on('close', substituteWorker);
worker.on('error', function (err) {
substituteWorker();
Env.Log.error("DB_WORKER_ERROR", {
error: err,
});
});
};
nThen(function (w) {
const max = config.maxWorkers;
var limit;
if (typeof(max) !== 'undefined') {
// the admin provided a limit on the number of workers
if (typeof(max) === 'number' && !isNaN(max)) {
if (max < 1) {
Log.info("INSUFFICIENT_MAX_WORKERS", max);
limit = 1;
}
limit = max;
} else {
Log.error("INVALID_MAX_WORKERS", '[' + max + ']');
}
}
var logged;
OS.cpus().forEach(function (cpu, index) {
if (limit && index >= limit) {
if (!logged) {
logged = true;
Log.info('WORKER_LIMIT', "(Opting not to use available CPUs beyond " + index + ')');
}
return;
}
initWorker(fork(DB_PATH), w(function (err) {
if (!err) { return; }
w.abort();
return void cb(err);
}));
});
}).nThen(function () {
Env.computeIndex = function (Env, channel, cb) {
Env.store.getWeakLock(channel, function (next) {
sendCommand({
channel: channel,
command: 'COMPUTE_INDEX',
}, function (err, index) {
next();
cb(err, index);
});
});
};
Env.computeMetadata = function (channel, cb) {
Env.store.getWeakLock(channel, function (next) {
sendCommand({
channel: channel,
command: 'COMPUTE_METADATA',
}, function (err, metadata) {
next();
cb(err, metadata);
});
});
};
Env.getOlderHistory = function (channel, oldestKnownHash, desiredMessages, desiredCheckpoint, cb) {
Env.store.getWeakLock(channel, function (next) {
sendCommand({
channel: channel,
command: "GET_OLDER_HISTORY",
hash: oldestKnownHash,
desiredMessages: desiredMessages,
desiredCheckpoint: desiredCheckpoint,
}, Util.both(next, cb));
});
};
Env.getPinState = function (safeKey, cb) {
Env.pinStore.getWeakLock(safeKey, function (next) {
sendCommand({
key: safeKey,
command: 'GET_PIN_STATE',
}, Util.both(next, cb));
});
};
Env.getFileSize = function (channel, cb) {
sendCommand({
command: 'GET_FILE_SIZE',
channel: channel,
}, cb);
};
Env.getDeletedPads = function (channels, cb) {
sendCommand({
command: "GET_DELETED_PADS",
channels: channels,
}, cb);
};
Env.getTotalSize = function (channels, cb) {
// we could take out locks for all of these channels,
// but it's OK if the size is slightly off
sendCommand({
command: 'GET_TOTAL_SIZE',
channels: channels,
}, cb);
};
Env.getMultipleFileSize = function (channels, cb) {
sendCommand({
command: "GET_MULTIPLE_FILE_SIZE",
channels: channels,
}, cb);
};
Env.getHashOffset = function (channel, hash, cb) {
Env.store.getWeakLock(channel, function (next) {
sendCommand({
command: 'GET_HASH_OFFSET',
channel: channel,
hash: hash,
}, Util.both(next, cb));
});
};
Env.removeOwnedBlob = function (blobId, safeKey, cb) {
sendCommand({
command: 'REMOVE_OWNED_BLOB',
blobId: blobId,
safeKey: safeKey,
}, cb);
};
Env.runTasks = function (cb) {
sendCommand({
command: 'RUN_TASKS',
}, cb);
};
Env.writeTask = function (time, command, args, cb) {
sendCommand({
command: 'WRITE_TASK',
time: time,
task_command: command,
args: args,
}, cb);
};
// Synchronous crypto functions
Env.validateMessage = function (signedMsg, key, cb) {
sendCommand({
msg: signedMsg,
key: key,
command: 'INLINE',
}, cb);
};
Env.checkSignature = function (signedMsg, signature, publicKey, cb) {
sendCommand({
command: 'DETACHED',
sig: signature,
msg: signedMsg,
key: publicKey,
}, cb);
};
Env.hashChannelList = function (channels, cb) {
sendCommand({
command: 'HASH_CHANNEL_LIST',
channels: channels,
}, cb);
};
cb(void 0);
});
};

527
package-lock.json

@ -1,6 +1,6 @@
{
"name": "cryptpad",
"version": "3.9.0",
"version": "3.16.0",
"lockfileVersion": 1,
"requires": true,
"dependencies": {
@ -13,6 +13,19 @@
"negotiator": "0.6.2"
}
},
"ajv": {
"version": "6.12.0",
"resolved": "https://registry.npmjs.org/ajv/-/ajv-6.12.0.tgz",
"integrity": "sha512-D6gFiFA0RRLyUbvijN74DWAjXSFxWKaWP7mldxkVhyhAV3+SWA9HEJPHQ2c9soIeTFJqcSdFDGFgdqs1iUU2Hw==",
"dev": true,
"optional": true,
"requires": {
"fast-deep-equal": "^3.1.1",
"fast-json-stable-stringify": "^2.0.0",
"json-schema-traverse": "^0.4.1",
"uri-js": "^4.2.2"
}
},
"ansi-regex": {
"version": "2.1.1",
"resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-2.1.1.tgz",
@ -55,17 +68,74 @@
"dev": true,
"optional": true
},
"asn1": {
"version": "0.2.4",
"resolved": "https://registry.npmjs.org/asn1/-/asn1-0.2.4.tgz",
"integrity": "sha512-jxwzQpLQjSmWXgwaCZE9Nz+glAG01yF1QnWgbhGwHI5A6FRIEY6IVqtHhIepHqI7/kyEyQEagBC5mBEFlIYvdg==",
"dev": true,
"optional": true,
"requires": {
"safer-buffer": "~2.1.0"
}
},
"assert-plus": {
"version": "1.0.0",
"resolved": "https://registry.npmjs.org/assert-plus/-/assert-plus-1.0.0.tgz",
"integrity": "sha1-8S4PPF13sLHN2RRpQuTpbB5N1SU=",
"dev": true,
"optional": true
},
"async-limiter": {
"version": "1.0.1",
"resolved": "https://registry.npmjs.org/async-limiter/-/async-limiter-1.0.1.tgz",
"integrity": "sha512-csOlWGAcRFJaI6m+F2WKdnMKr4HhdhFVBk0H/QbJFMCr+uO2kwohwXQPxw/9OCxp05r5ghVBFSyioixx3gfkNQ=="
},
"asynckit": {
"version": "0.4.0",
"resolved": "https://registry.npmjs.org/asynckit/-/asynckit-0.4.0.tgz",
"integrity": "sha1-x57Zf380y48robyXkLzDZkdLS3k=",
"dev": true,
"optional": true
},
"aws-sign2": {
"version": "0.7.0",
"resolved": "https://registry.npmjs.org/aws-sign2/-/aws-sign2-0.7.0.tgz",
"integrity": "sha1-tG6JCTSpWR8tL2+G1+ap8bP+dqg=",
"dev": true,
"optional": true
},
"aws4": {
"version": "1.9.1",
"resolved": "https://registry.npmjs.org/aws4/-/aws4-1.9.1.tgz",
"integrity": "sha512-wMHVg2EOHaMRxbzgFJ9gtjOOCrI80OHLG14rxi28XwOW8ux6IiEbRCGGGqCtdAIg4FQCbW20k9RsT4y3gJlFug==",
"dev": true,
"optional": true
},
"balanced-match": {
"version": "1.0.0",
"resolved": "https://registry.npmjs.org/balanced-match/-/balanced-match-1.0.0.tgz",
"integrity": "sha1-ibTRmasr7kneFk6gK4nORi1xt2c=",
"dev": true
},
"bcrypt-pbkdf": {
"version": "1.0.2",
"resolved": "https://registry.npmjs.org/bcrypt-pbkdf/-/bcrypt-pbkdf-1.0.2.tgz",
"integrity": "sha1-pDAdOJtqQ/m2f/PKEaP2Y342Dp4=",
"dev": true,
"optional": true,
"requires": {
"tweetnacl": "^0.14.3"
},
"dependencies": {
"tweetnacl": {
"version": "0.14.5",
"resolved": "https://registry.npmjs.org/tweetnacl/-/tweetnacl-0.14.5.tgz",
"integrity": "sha1-WuaBd/GS1EViadEIr6k/+HQ/T2Q=",
"dev": true,
"optional": true
}
}
},
"body-parser": {
"version": "1.18.3",
"resolved": "https://registry.npmjs.org/body-parser/-/body-parser-1.18.3.tgz",
@ -98,10 +168,17 @@
"resolved": "https://registry.npmjs.org/bytes/-/bytes-3.0.0.tgz",
"integrity": "sha1-0ygVQE1olpn4Wk6k+odV3ROpYEg="
},
"caseless": {
"version": "0.12.0",
"resolved": "https://registry.npmjs.org/caseless/-/caseless-0.12.0.tgz",
"integrity": "sha1-G2gcIf+EAzyCZUMJBolCDRhxUdw=",
"dev": true,
"optional": true
},
"chainpad-crypto": {
"version": "0.2.2",
"resolved": "https://registry.npmjs.org/chainpad-crypto/-/chainpad-crypto-0.2.2.tgz",
"integrity": "sha512-7MJ7qPz/C4sJPsDhPMjdSRmliOCPoRO0XM1vUomcgXA6HINlW+if9AAt/H4q154nYhZ/b57njgC6cWgd/RDidg==",
"version": "0.2.4",
"resolved": "https://registry.npmjs.org/chainpad-crypto/-/chainpad-crypto-0.2.4.tgz",
"integrity": "sha512-fWbVyeAv35vf/dkkQaefASlJcEfpEvfRI23Mtn+/TBBry7+LYNuJMXJiovVY35pfyw2+trKh1Py5Asg9vrmaVg==",
"requires": {
"tweetnacl": "git://github.com/dchest/tweetnacl-js.git#v0.12.2"
},
@ -113,14 +190,13 @@
}
},
"chainpad-server": {
"version": "3.0.5",
"resolved": "https://registry.npmjs.org/chainpad-server/-/chainpad-server-3.0.5.tgz",
"integrity": "sha512-USKOMSHsNjnme81Qy3nQ+ji9eCkBPokYH4T82LVHAI0aayTSCXcTPUDLVGDBCRqe8NsXU4io1WPXn1KiZwB8fA==",
"version": "4.0.9",
"resolved": "https://registry.npmjs.org/chainpad-server/-/chainpad-server-4.0.9.tgz",
"integrity": "sha512-8h1W41ktE05TM6LuXrklpW2TUxWeNyIDiRaQygKsXaA/7pyJxF7+AmPVS+xW0c31VkHjQDPiaMzPoxhcxXnIyA==",
"requires": {
"nthen": "^0.1.8",
"nthen": "0.1.8",
"pull-stream": "^3.6.9",
"stream-to-pull-stream": "^1.7.3",
"tweetnacl": "~0.12.2",
"ws": "^3.3.1"
}
},
@ -160,10 +236,20 @@
"integrity": "sha1-p9BVi9icQveV3UIyj3QIMcpTvCU=",
"dev": true
},
"combined-stream": {
"version": "1.0.8",
"resolved": "https://registry.npmjs.org/combined-stream/-/combined-stream-1.0.8.tgz",
"integrity": "sha512-FQN4MRfuJeHf7cBbBMJFXhKSDq+2kAArBlmRBvcvFE5BB1HZKXtSFASDhdlz9zOYwxh8lDdnvmMOe/+5cdoEdg==",
"dev": true,
"optional": true,
"requires": {
"delayed-stream": "~1.0.0"
}
},
"commander": {
"version": "2.20.0",
"resolved": "https://registry.npmjs.org/commander/-/commander-2.20.0.tgz",
"integrity": "sha512-7j2y+40w61zy6YC2iRNpUe/NwhNyoXrYpHMrSunaMG64nRnaf96zO/KMQR4OyN/UnE5KLyEBnKHd4aG3rskjpQ==",
"version": "2.20.3",
"resolved": "https://registry.npmjs.org/commander/-/commander-2.20.3.tgz",
"integrity": "sha512-GpVkmM8vF2vQUkj2LvZmD35JxeJOLCwJ9cUkugyk2nuhbv3+mJvpLYYt+0+USMxE+oj+ey/lJEnhZw75x/OMcQ==",
"dev": true
},
"concat-map": {
@ -207,6 +293,16 @@
"integrity": "sha1-tf1UIgqivFq1eqtxQMlAdUUDwac=",
"dev": true
},
"dashdash": {
"version": "1.14.1",
"resolved": "https://registry.npmjs.org/dashdash/-/dashdash-1.14.1.tgz",
"integrity": "sha1-hTz6D3y+L+1d4gMmuN1YEDX24vA=",
"dev": true,
"optional": true,
"requires": {
"assert-plus": "^1.0.0"
}
},
"date-now": {
"version": "0.1.4",
"resolved": "https://registry.npmjs.org/date-now/-/date-now-0.1.4.tgz",
@ -221,6 +317,13 @@
"ms": "2.0.0"
}
},
"delayed-stream": {
"version": "1.0.0",
"resolved": "https://registry.npmjs.org/delayed-stream/-/delayed-stream-1.0.0.tgz",
"integrity": "sha1-3zrhmayt+31ECqrgsp4icrJOxhk=",
"dev": true,
"optional": true
},
"depd": {
"version": "1.1.2",
"resolved": "https://registry.npmjs.org/depd/-/depd-1.1.2.tgz",
@ -241,9 +344,9 @@
}
},
"dom-serializer": {
"version": "0.2.1",
"resolved": "https://registry.npmjs.org/dom-serializer/-/dom-serializer-0.2.1.tgz",
"integrity": "sha512-sK3ujri04WyjwQXVoK4PU3y8ula1stq10GJZpqHIUgoGZdsGzAGu65BnU3d08aTVSvO7mGPZUc0wTEDL+qGE0Q==",
"version": "0.2.2",
"resolved": "https://registry.npmjs.org/dom-serializer/-/dom-serializer-0.2.2.tgz",
"integrity": "sha512-2/xPb3ORsQ42nHYiSunXkDjPLBaEj/xTwUO4B7XCZQTRk7EBtTOPaygh10YAAh2OI1Qrp6NWfpAhzswj0ydt9g==",
"dev": true,
"requires": {
"domelementtype": "^2.0.1",
@ -298,6 +401,17 @@
"is-obj": "^1.0.0"
}
},
"ecc-jsbn": {
"version": "0.1.2",
"resolved": "https://registry.npmjs.org/ecc-jsbn/-/ecc-jsbn-0.1.2.tgz",
"integrity": "sha1-OoOpBOVDUyh4dMVkt1SThoSamMk=",
"dev": true,
"optional": true,
"requires": {
"jsbn": "~0.1.0",
"safer-buffer": "^2.1.0"
}
},
"ee-first": {
"version": "1.1.1",
"resolved": "https://registry.npmjs.org/ee-first/-/ee-first-1.1.1.tgz",
@ -383,6 +497,34 @@
"vary": "~1.1.2"
}
},
"extend": {
"version": "3.0.2",
"resolved": "https://registry.npmjs.org/extend/-/extend-3.0.2.tgz",
"integrity": "sha512-fjquC59cD7CyW6urNXK0FBufkZcoiGG80wTuPujX590cB5Ttln20E2UB4S/WARVqhXffZl2LNgS+gQdPIIim/g==",
"dev": true,
"optional": true
},
"extsprintf": {
"version": "1.3.0",
"resolved": "https://registry.npmjs.org/extsprintf/-/extsprintf-1.3.0.tgz",
"integrity": "sha1-lpGEQOMEGnpBT4xS48V06zw+HgU=",
"dev": true,
"optional": true
},
"fast-deep-equal": {
"version": "3.1.1",
"resolved": "https://registry.npmjs.org/fast-deep-equal/-/fast-deep-equal-3.1.1.tgz",
"integrity": "sha512-8UEa58QDLauDNfpbrX55Q9jrGHThw2ZMdOky5Gl1CDtVeJDPVrG4Jxx1N8jw2gkWaff5UUuX1KJd+9zGe2B+ZA==",
"dev": true,
"optional": true
},
"fast-json-stable-stringify": {
"version": "2.1.0",
"resolved": "https://registry.npmjs.org/fast-json-stable-stringify/-/fast-json-stable-stringify-2.1.0.tgz",
"integrity": "sha512-lhd/wF+Lk98HZoTCtlVraHtfh5XYijIjalXck7saUtuanSDyLMxnHhSXEDJqHxD7msR8D0uCmqlkwjCV8xvwHw==",
"dev": true,
"optional": true
},
"finalhandler": {
"version": "1.1.1",
"resolved": "https://registry.npmjs.org/finalhandler/-/finalhandler-1.1.1.tgz",
@ -398,16 +540,29 @@
}
},
"flatten": {
"version": "1.0.2",
"resolved": "https://registry.npmjs.org/flatten/-/flatten-1.0.2.tgz",
"integrity": "sha1-2uRqnXj74lKSJYzB54CkHZXAN4I=",
"version": "1.0.3",
"resolved": "https://registry.npmjs.org/flatten/-/flatten-1.0.3.tgz",
"integrity": "sha512-dVsPA/UwQ8+2uoFe5GHtiBMu48dWLTdsuEd7CKGlZlD78r1TTWBvDuFaFGKCo/ZfEr95Uk56vZoX86OsHkUeIg==",
"dev": true
},
"flow-bin": {
"version": "0.59.0",
"resolved": "https://registry.npmjs.org/flow-bin/-/flow-bin-0.59.0.tgz",
"integrity": "sha512-yJDRffvby5mCTkbwOdXwiGDjeea8Z+BPVuP53/tHqHIZC+KtQD790zopVf7mHk65v+wRn+TZ7tkRSNA9oDmyLg==",
"dev": true
"forever-agent": {
"version": "0.6.1",
"resolved": "https://registry.npmjs.org/forever-agent/-/forever-agent-0.6.1.tgz",
"integrity": "sha1-+8cfDEGt6zf5bFd60e1C2P2sypE=",
"dev": true,
"optional": true
},
"form-data": {
"version": "2.3.3",
"resolved": "https://registry.npmjs.org/form-data/-/form-data-2.3.3.tgz",
"integrity": "sha512-1lLKB2Mu3aGP1Q/2eCOx0fNbRMe7XdwktwOruhfqqd0rIJWwN4Dh+E3hrPSlDCXnSR7UtZ1N38rVXm+6+MEhJQ==",
"dev": true,
"optional": true,
"requires": {
"asynckit": "^0.4.0",
"combined-stream": "^1.0.6",
"mime-types": "^2.1.12"
}
},
"forwarded": {
"version": "0.1.2",
@ -449,10 +604,20 @@
"tiny-each-async": "2.0.3"
}
},
"getpass": {
"version": "0.1.7",
"resolved": "https://registry.npmjs.org/getpass/-/getpass-0.1.7.tgz",
"integrity": "sha1-Xv+OPmhNVprkyysSgmBOi6YhSfo=",
"dev": true,
"optional": true,
"requires": {
"assert-plus": "^1.0.0"
}
},
"glob": {
"version": "7.1.4",
"resolved": "https://registry.npmjs.org/glob/-/glob-7.1.4.tgz",
"integrity": "sha512-hkLPepehmnKk41pUGm3sYxoFs/umurYfYJCerbXEyFIWcAzvpipAgVkBqqT9RBKMGjnq6kMuyYwha6csxbiM1A==",
"version": "7.1.6",
"resolved": "https://registry.npmjs.org/glob/-/glob-7.1.6.tgz",
"integrity": "sha512-LwaxwyZ72Lk7vZINtNNrywX0ZuLyStrdDtabefZKAY5ZGJhVtgdznluResxNmPitE0SAO+O26sWTHeKSI2wMBA==",
"dev": true,
"requires": {
"fs.realpath": "^1.0.0",
@ -478,9 +643,27 @@
}
},
"graceful-fs": {
"version": "4.2.2",
"resolved": "https://registry.npmjs.org/graceful-fs/-/graceful-fs-4.2.2.tgz",
"integrity": "sha512-IItsdsea19BoLC7ELy13q1iJFNmd7ofZH5+X/pJr90/nRoPEX0DJo1dHDbgtYWOhJhcCgMDTOw84RZ72q6lB+Q=="
"version": "4.2.3",
"resolved": "https://registry.npmjs.org/graceful-fs/-/graceful-fs-4.2.3.tgz",
"integrity": "sha512-a30VEBm4PEdx1dRB7MFK7BejejvCvBronbLjht+sHuGYj8PHs7M/5Z+rt5lw551vZ7yfTCj4Vuyy3mSJytDWRQ=="
},
"har-schema": {
"version": "2.0.0",
"resolved": "https://registry.npmjs.org/har-schema/-/har-schema-2.0.0.tgz",
"integrity": "sha1-qUwiJOvKwEeCoNkDVSHyRzW37JI=",
"dev": true,
"optional": true
},
"har-validator": {
"version": "5.1.3",
"resolved": "https://registry.npmjs.org/har-validator/-/har-validator-5.1.3.tgz",
"integrity": "sha512-sNvOCzEQNr/qrvJgc3UG/kD4QtlHycrzwS+6mfTrrSq97BvaYcPZZI1ZSqGSPR73Cxn4LKTD4PttRwfU7jWq5g==",
"dev": true,
"optional": true,
"requires": {
"ajv": "^6.5.5",
"har-schema": "^2.0.0"
}
},
"has-ansi": {
"version": "2.0.0",
@ -521,6 +704,18 @@
"statuses": ">= 1.4.0 < 2"
}
},
"http-signature": {
"version": "1.2.0",
"resolved": "https://registry.npmjs.org/http-signature/-/http-signature-1.2.0.tgz",
"integrity": "sha1-muzZJRFHcvPZW2WmCruPfBj7rOE=",
"dev": true,
"optional": true,
"requires": {
"assert-plus": "^1.0.0",
"jsprim": "^1.2.2",
"sshpk": "^1.7.0"
}
},
"iconv-lite": {
"version": "0.4.23",
"resolved": "https://registry.npmjs.org/iconv-lite/-/iconv-lite-0.4.23.tgz",
@ -580,22 +775,43 @@
"integrity": "sha1-PkcprB9f3gJc19g6iW2rn09n2w8=",
"dev": true
},
"is-typedarray": {
"version": "1.0.0",
"resolved": "https://registry.npmjs.org/is-typedarray/-/is-typedarray-1.0.0.tgz",
"integrity": "sha1-5HnICFjfDBsR3dppQPlgEfzaSpo=",
"dev": true,
"optional": true
},
"isarray": {
"version": "0.0.1",
"resolved": "https://registry.npmjs.org/isarray/-/isarray-0.0.1.tgz",
"integrity": "sha1-ihis/Kmo9Bd+Cav8YDiTmwXR7t8=",
"dev": true
},
"isstream": {
"version": "0.1.2",
"resolved": "https://registry.npmjs.org/isstream/-/isstream-0.1.2.tgz",
"integrity": "sha1-R+Y/evVa+m+S4VAOaQ64uFKcCZo=",
"dev": true,
"optional": true
},
"js-base64": {
"version": "2.5.1",
"resolved": "https://registry.npmjs.org/js-base64/-/js-base64-2.5.1.tgz",
"integrity": "sha512-M7kLczedRMYX4L8Mdh4MzyAMM9O5osx+4FcOQuTvr3A9F2D9S5JXheN0ewNbrvK2UatkTRhL5ejGmGSjNMiZuw==",
"dev": true
},
"jsbn": {
"version": "0.1.1",
"resolved": "https://registry.npmjs.org/jsbn/-/jsbn-0.1.1.tgz",
"integrity": "sha1-peZUwuWi3rXyAdls77yoDA7y9RM=",
"dev": true,
"optional": true
},
"jshint": {
"version": "2.10.2",
"resolved": "https://registry.npmjs.org/jshint/-/jshint-2.10.2.tgz",
"integrity": "sha512-e7KZgCSXMJxznE/4WULzybCMNXNAd/bf5TSrvVEq78Q/K8ZwFpmBqQeDtNiHc3l49nV4E/+YeHU/JZjSUIrLAA==",
"version": "2.11.0",
"resolved": "https://registry.npmjs.org/jshint/-/jshint-2.11.0.tgz",
"integrity": "sha512-ooaD/hrBPhu35xXW4gn+o3SOuzht73gdBuffgJzrZBJZPGgGiiTvJEgTyxFvBO2nz0+X1G6etF8SzUODTlLY6Q==",
"dev": true,
"requires": {
"cli": "~1.0.0",
@ -608,6 +824,27 @@
"strip-json-comments": "1.0.x"
}
},
"json-schema": {
"version": "0.2.3",
"resolved": "https://registry.npmjs.org/json-schema/-/json-schema-0.2.3.tgz",
"integrity": "sha1-tIDIkuWaLwWVTOcnvT8qTogvnhM=",
"dev": true,
"optional": true
},
"json-schema-traverse": {
"version": "0.4.1",
"resolved": "https://registry.npmjs.org/json-schema-traverse/-/json-schema-traverse-0.4.1.tgz",
"integrity": "sha512-xbbCH5dCYU5T8LcEhhuh7HJ88HXuW3qsI3Y0zOZFKfZEHcpWiHU/Jxzk629Brsab/mMiHQti9wMP+845RPe3Vg==",
"dev": true,
"optional": true
},
"json-stringify-safe": {
"version": "5.0.1",
"resolved": "https://registry.npmjs.org/json-stringify-safe/-/json-stringify-safe-5.0.1.tgz",
"integrity": "sha1-Epai1Y/UXxmg9s4B1lcB4sc1tus=",
"dev": true,
"optional": true
},
"jsonfile": {
"version": "4.0.0",
"resolved": "https://registry.npmjs.org/jsonfile/-/jsonfile-4.0.0.tgz",
@ -616,6 +853,19 @@
"graceful-fs": "^4.1.6"
}
},
"jsprim": {
"version": "1.4.1",
"resolved": "https://registry.npmjs.org/jsprim/-/jsprim-1.4.1.tgz",
"integrity": "sha1-MT5mvB5cwG5Di8G3SZwuXFastqI=",
"dev": true,
"optional": true,
"requires": {
"assert-plus": "1.0.0",
"extsprintf": "1.3.0",
"json-schema": "0.2.3",
"verror": "1.10.0"
}
},
"jszip": {
"version": "3.2.2",
"resolved": "https://registry.npmjs.org/jszip/-/jszip-3.2.2.tgz",
@ -635,9 +885,9 @@
"dev": true
},
"readable-stream": {
"version": "2.3.6",
"resolved": "https://registry.npmjs.org/readable-stream/-/readable-stream-2.3.6.tgz",
"integrity": "sha512-tQtKA9WIAhBF3+VLAseyMqZeBjW0AHJoxOtYqSUZNJxauErmLbVm2FW1y+J/YA9dUrAC39ITejlZWhVIwawkKw==",
"version": "2.3.7",
"resolved": "https://registry.npmjs.org/readable-stream/-/readable-stream-2.3.7.tgz",
"integrity": "sha512-Ebho8K4jIbHAxnuxi7o42OrZgF/ZTNcsZj6nRKyUmkhLFq8CHItp/fy6hQZuZmP/n3yZ9VBUbp4zz/mX8hmYPw==",
"dev": true,
"requires": {
"core-util-is": "~1.0.0",
@ -661,18 +911,28 @@
}
},
"less": {
"version": "2.7.1",
"resolved": "https://registry.npmjs.org/less/-/less-2.7.1.tgz",
"integrity": "sha1-bL/qIrO4MDBOml+zcdVPpIDJ188=",
"version": "3.7.1",
"resolved": "https://registry.npmjs.org/less/-/less-3.7.1.tgz",
"integrity": "sha512-Cmf5XJlzNklkBC8eAa+Ef16AHUBAkApHNAw3x9Vmn84h2BvGrri5Id7kf6H1n6SN74Fc0WdHIRPlFMxsl0eJkA==",
"dev": true,
"requires": {
"errno": "^0.1.1",
"graceful-fs": "^4.1.2",
"image-size": "~0.5.0",
"mime": "^1.2.11",
"mime": "^1.4.1",
"mkdirp": "^0.5.0",
"promise": "^7.1.1",
"source-map": "^0.5.3"
"request": "^2.83.0",
"source-map": "~0.6.0"
},
"dependencies": {
"source-map": {
"version": "0.6.1",
"resolved": "https://registry.npmjs.org/source-map/-/source-map-0.6.1.tgz",
"integrity": "sha512-UjgapumWlbMhkBgzT7Ykc5YXUT46F0iKu8SGXq0bcwP5dz/h0Plj6enJqjz1Zbq2l5WaqYnrVbwWOWMyF3F47g==",
"dev": true,
"optional": true
}
}
},
"lesshint": {
@ -766,16 +1026,16 @@
"integrity": "sha512-KI1+qOZu5DcW6wayYHSzR/tXKCDC5Om4s1z2QJjDULzLcmf3DvzS7oluY4HCTrc+9FiKmWUgeNLg7W3uIQvxtQ=="
},
"mime-db": {
"version": "1.40.0",
"resolved": "https://registry.npmjs.org/mime-db/-/mime-db-1.40.0.tgz",
"integrity": "sha512-jYdeOMPy9vnxEqFRRo6ZvTZ8d9oPb+k18PKoYNYUe2stVEBPPwsln/qWzdbmaIvnhZ9v2P+CuecK+fpUfsV2mA=="
"version": "1.43.0",
"resolved": "https://registry.npmjs.org/mime-db/-/mime-db-1.43.0.tgz",
"integrity": "sha512-+5dsGEEovYbT8UY9yD7eE4XTc4UwJ1jBYlgaQQF38ENsKR3wj/8q8RFZrF9WIZpB2V1ArTVFUva8sAul1NzRzQ=="
},
"mime-types": {
"version": "2.1.24",
"resolved": "https://registry.npmjs.org/mime-types/-/mime-types-2.1.24.tgz",
"integrity": "sha512-WaFHS3MCl5fapm3oLxU4eYDw77IQM2ACcxQ9RIxfaC3ooc6PFuBMGZZsYpvoXS5D5QTWPieo1jjLdAm3TBP3cQ==",
"version": "2.1.26",
"resolved": "https://registry.npmjs.org/mime-types/-/mime-types-2.1.26.tgz",
"integrity": "sha512-01paPWYgLrkqAyrlDorC1uDwl2p3qZT7yl806vW7DvDoxwXi46jsjFbg+WdwotBIk6/MbEhO/dh5aZ5sNj/dWQ==",
"requires": {
"mime-db": "1.40.0"
"mime-db": "1.43.0"
}
},
"minimatch": {
@ -788,20 +1048,20 @@
}
},
"minimist": {
"version": "0.0.8",
"resolved": "https://registry.npmjs.org/minimist/-/minimist-0.0.8.tgz",
"integrity": "sha1-hX/Kv8M5fSYluCKCYuhqp6ARsF0=",
"version": "1.2.5",
"resolved": "https://registry.npmjs.org/minimist/-/minimist-1.2.5.tgz",
"integrity": "sha512-FM9nNUYrRBAELZQT3xeZQ7fmMOBg6nWNmJKTcgsJeaLstP/UODVpGsr5OhXhhXg6f+qtJ8uiZ+PUxkDWcgIXLw==",
"dev": true,
"optional": true
},
"mkdirp": {
"version": "0.5.1",
"resolved": "https://registry.npmjs.org/mkdirp/-/mkdirp-0.5.1.tgz",
"integrity": "sha1-MAV0OOrGz3+MR2fzhkjWaX11yQM=",
"version": "0.5.3",
"resolved": "https://registry.npmjs.org/mkdirp/-/mkdirp-0.5.3.tgz",
"integrity": "sha512-P+2gwrFqx8lhew375MQHHeTlY8AuOJSrGf0R5ddkEndUkmwpgUob/vQuBD1V22/Cw1/lJr4x+EjllSezBThzBg==",
"dev": true,
"optional": true,
"requires": {
"minimist": "0.0.8"
"minimist": "^1.2.5"
}
},
"ms": {
@ -824,6 +1084,13 @@
"resolved": "https://registry.npmjs.org/nthen/-/nthen-0.1.8.tgz",
"integrity": "sha512-Oh2CwIbhj+wUT94lQV7LKmmgw3UYAGGd8oLIqp6btQN3Bz3PuWp4BuvtUo35H3rqDknjPfKx5P6mt7v+aJNjcw=="
},
"oauth-sign": {
"version": "0.9.0",
"resolved": "https://registry.npmjs.org/oauth-sign/-/oauth-sign-0.9.0.tgz",
"integrity": "sha512-fexhUFFPTGV8ybAtSIGbV6gOkSv8UtRbDBnAyLQw4QPKkgNlsH2ByPGtMUqdWkos6YCRmAqViwgZrJc/mRDzZQ==",
"dev": true,
"optional": true
},
"on-finished": {
"version": "2.3.0",
"resolved": "https://registry.npmjs.org/on-finished/-/on-finished-2.3.0.tgz",
@ -848,9 +1115,9 @@
"dev": true
},
"pako": {
"version": "1.0.10",
"resolved": "https://registry.npmjs.org/pako/-/pako-1.0.10.tgz",
"integrity": "sha512-0DTvPVU3ed8+HNXOu5Bs+o//Mbdj9VNQMUOe9oKCwh8l0GNwpTDMKCWbRjgtD291AWnkAgkqA/LOnQS8AmS1tw==",
"version": "1.0.11",
"resolved": "https://registry.npmjs.org/pako/-/pako-1.0.11.tgz",
"integrity": "sha512-4hLB8Py4zZce5s4yd9XzopqwVv/yGNhV1Bl8NTmCq1763HeK2+EwVTv+leGeL13Dnh2wfbqowVPXCIO0z4taYw==",
"dev": true
},
"parseurl": {
@ -878,6 +1145,13 @@
"pify": "^3.0.0"
}
},
"performance-now": {
"version": "2.1.0",
"resolved": "https://registry.npmjs.org/performance-now/-/performance-now-2.1.0.tgz",
"integrity": "sha1-Ywn04OX6kT7BxpMHrjZLSzd8nns=",
"dev": true,
"optional": true
},
"pify": {
"version": "3.0.0",
"resolved": "https://registry.npmjs.org/pify/-/pify-3.0.0.tgz",
@ -1022,11 +1296,25 @@
"dev": true,
"optional": true
},
"psl": {
"version": "1.7.0",
"resolved": "https://registry.npmjs.org/psl/-/psl-1.7.0.tgz",
"integrity": "sha512-5NsSEDv8zY70ScRnOTn7bK7eanl2MvFrOrS/R6x+dBt5g1ghnj9Zv90kO8GwT8gxcu2ANyFprnFYB85IogIJOQ==",
"dev": true,
"optional": true
},
"pull-stream": {
"version": "3.6.14",
"resolved": "https://registry.npmjs.org/pull-stream/-/pull-stream-3.6.14.tgz",
"integrity": "sha512-KIqdvpqHHaTUA2mCYcLG1ibEbu/LCKoJZsBWyv9lSYtPkJPBq8m3Hxa103xHi6D2thj5YXa0TqK3L3GUkwgnew=="
},
"punycode": {
"version": "2.1.1",
"resolved": "https://registry.npmjs.org/punycode/-/punycode-2.1.1.tgz",
"integrity": "sha512-XRsRjdf+j5ml+y/6GKHPZbrF/8p2Yga0JPtdqTIY2Xe5ohJPD9saDJJLPvp9+NSBprVvevdXZybnj2cv8OEd0A==",
"dev": true,
"optional": true
},
"qs": {
"version": "6.5.2",
"resolved": "https://registry.npmjs.org/qs/-/qs-6.5.2.tgz",
@ -1069,6 +1357,35 @@
"string_decoder": "~0.10.x"
}
},
"request": {
"version": "2.88.2",
"resolved": "https://registry.npmjs.org/request/-/request-2.88.2.tgz",
"integrity": "sha512-MsvtOrfG9ZcrOwAW+Qi+F6HbD0CWXEh9ou77uOb7FM2WPhwT7smM833PzanhJLsgXjN89Ir6V2PczXNnMpwKhw==",
"dev": true,
"optional": true,
"requires": {
"aws-sign2": "~0.7.0",
"aws4": "^1.8.0",
"caseless": "~0.12.0",
"combined-stream": "~1.0.6",
"extend": "~3.0.2",
"forever-agent": "~0.6.1",
"form-data": "~2.3.2",
"har-validator": "~5.1.3",
"http-signature": "~1.2.0",
"is-typedarray": "~1.0.0",
"isstream": "~0.1.2",
"json-stringify-safe": "~5.0.1",
"mime-types": "~2.1.19",
"oauth-sign": "~0.9.0",
"performance-now": "^2.1.0",
"qs": "~6.5.2",
"safe-buffer": "^5.1.2",
"tough-cookie": "~2.5.0",
"tunnel-agent": "^0.6.0",
"uuid": "^3.3.2"
}
},
"rimraf": {
"version": "2.7.1",
"resolved": "https://registry.npmjs.org/rimraf/-/rimraf-2.7.1.tgz",
@ -1179,6 +1496,33 @@
"integrity": "sha1-igOdLRAh0i0eoUyA2OpGi6LvP8w=",
"dev": true
},
"sshpk": {
"version": "1.16.1",
"resolved": "https://registry.npmjs.org/sshpk/-/sshpk-1.16.1.tgz",
"integrity": "sha512-HXXqVUq7+pcKeLqqZj6mHFUMvXtOJt1uoUx09pFW6011inTMxqI8BA8PM95myrIyyKwdnzjdFjLiE6KBPVtJIg==",
"dev": true,
"optional": true,
"requires": {
"asn1": "~0.2.3",
"assert-plus": "^1.0.0",
"bcrypt-pbkdf": "^1.0.0",
"dashdash": "^1.12.0",
"ecc-jsbn": "~0.1.1",
"getpass": "^0.1.1",
"jsbn": "~0.1.0",
"safer-buffer": "^2.0.2",
"tweetnacl": "~0.14.0"
},
"dependencies": {
"tweetnacl": {
"version": "0.14.5",
"resolved": "https://registry.npmjs.org/tweetnacl/-/tweetnacl-0.14.5.tgz",
"integrity": "sha1-WuaBd/GS1EViadEIr6k/+HQ/T2Q=",
"dev": true,
"optional": true
}
}
},
"statuses": {
"version": "1.4.0",
"resolved": "https://registry.npmjs.org/statuses/-/statuses-1.4.0.tgz",
@ -1237,6 +1581,27 @@
"os-tmpdir": "~1.0.1"
}
},
"tough-cookie": {
"version": "2.5.0",
"resolved": "https://registry.npmjs.org/tough-cookie/-/tough-cookie-2.5.0.tgz",
"integrity": "sha512-nlLsUzgm1kfLXSXfRZMc1KLAugd4hqJHDTvc2hDIwS3mZAfMEuMbc03SujMF+GEcpaX/qboeycw6iO8JwVv2+g==",
"dev": true,
"optional": true,
"requires": {
"psl": "^1.1.28",
"punycode": "^2.1.1"
}
},
"tunnel-agent": {
"version": "0.6.0",
"resolved": "https://registry.npmjs.org/tunnel-agent/-/tunnel-agent-0.6.0.tgz",
"integrity": "sha1-J6XeoGs2sEoKmWZ3SykIaPD8QP0=",
"dev": true,
"optional": true,
"requires": {
"safe-buffer": "^5.0.1"
}
},
"tweetnacl": {
"version": "0.12.2",
"resolved": "https://registry.npmjs.org/tweetnacl/-/tweetnacl-0.12.2.tgz",
@ -1251,6 +1616,11 @@
"mime-types": "~2.1.24"
}
},
"ulimit": {
"version": "0.0.2",
"resolved": "https://registry.npmjs.org/ulimit/-/ulimit-0.0.2.tgz",
"integrity": "sha1-K1H53IOBrkECY2zsXrM4wmMFiKA="
},
"ultron": {
"version": "1.1.1",
"resolved": "https://registry.npmjs.org/ultron/-/ultron-1.1.1.tgz",
@ -1272,6 +1642,16 @@
"resolved": "https://registry.npmjs.org/unpipe/-/unpipe-1.0.0.tgz",
"integrity": "sha1-sr9O6FFKrmFltIF4KdIbLvSZBOw="
},
"uri-js": {
"version": "4.2.2",
"resolved": "https://registry.npmjs.org/uri-js/-/uri-js-4.2.2.tgz",
"integrity": "sha512-KY9Frmirql91X2Qgjry0Wd4Y+YTdrdZheS8TFwvkbLWf/G5KNJDCh6pKL5OZctEW4+0Baa5idK2ZQuELRwPznQ==",
"dev": true,
"optional": true,
"requires": {
"punycode": "^2.1.0"
}
},
"util-deprecate": {
"version": "1.0.2",
"resolved": "https://registry.npmjs.org/util-deprecate/-/util-deprecate-1.0.2.tgz",
@ -1283,11 +1663,30 @@
"resolved": "https://registry.npmjs.org/utils-merge/-/utils-merge-1.0.1.tgz",
"integrity": "sha1-n5VxD1CiZ5R7LMwSR0HBAoQn5xM="
},
"uuid": {
"version": "3.4.0",
"resolved": "https://registry.npmjs.org/uuid/-/uuid-3.4.0.tgz",
"integrity": "sha512-HjSDRw6gZE5JMggctHBcjVak08+KEVhSIiDzFnT9S9aegmp85S/bReBVTb4QTFaRNptJ9kuYaNhnbNEOkbKb/A==",
"dev": true,
"optional": true
},
"vary": {
"version": "1.1.2",
"resolved": "https://registry.npmjs.org/vary/-/vary-1.1.2.tgz",
"integrity": "sha1-IpnwLG3tMNSllhsLn3RSShj2NPw="
},
"verror": {
"version": "1.10.0",
"resolved": "https://registry.npmjs.org/verror/-/verror-1.10.0.tgz",
"integrity": "sha1-OhBcoXBTr1XW4nDB+CiGguGNpAA=",
"dev": true,
"optional": true,
"requires": {
"assert-plus": "^1.0.0",
"core-util-is": "1.0.2",
"extsprintf": "^1.2.0"
}
},
"wrappy": {
"version": "1.0.2",
"resolved": "https://registry.npmjs.org/wrappy/-/wrappy-1.0.2.tgz",
@ -1305,19 +1704,19 @@
}
},
"xml2js": {
"version": "0.4.19",
"resolved": "https://registry.npmjs.org/xml2js/-/xml2js-0.4.19.tgz",
"integrity": "sha512-esZnJZJOiJR9wWKMyuvSE1y6Dq5LCuJanqhxslH2bxM6duahNZ+HMpCLhBQGZkbX6xRf8x1Y2eJlgt2q3qo49Q==",
"version": "0.4.23",
"resolved": "https://registry.npmjs.org/xml2js/-/xml2js-0.4.23.tgz",
"integrity": "sha512-ySPiMjM0+pLDftHgXY4By0uswI3SPKLDw/i3UXbnO8M/p28zqexCUoPmQFrYD+/1BzhGJSs2i1ERWKJAtiLrug==",
"dev": true,
"requires": {
"sax": ">=0.6.0",
"xmlbuilder": "~9.0.1"
"xmlbuilder": "~11.0.0"
}
},
"xmlbuilder": {
"version": "9.0.7",
"resolved": "https://registry.npmjs.org/xmlbuilder/-/xmlbuilder-9.0.7.tgz",
"integrity": "sha1-Ey7mPS7FVlxVfiD0wi35rKaGsQ0=",
"version": "11.0.1",
"resolved": "https://registry.npmjs.org/xmlbuilder/-/xmlbuilder-11.0.1.tgz",
"integrity": "sha512-fDlsI/kFEx7gLvbecc0/ohLG50fugQp8ryHzMTuW9vSa1GJ0XYWKnhsUx7oie3G98+r56aTQIUB4kht42R3JvA==",
"dev": true
}
}

10
package.json

@ -1,7 +1,7 @@
{
"name": "cryptpad",
"description": "realtime collaborative visual editor with zero knowlege server",
"version": "3.9.0",
"version": "3.16.0",
"license": "AGPL-3.0+",
"repository": {
"type": "git",
@ -13,7 +13,7 @@
},
"dependencies": {
"chainpad-crypto": "^0.2.2",
"chainpad-server": "^3.0.5",
"chainpad-server": "^4.0.9",
"express": "~4.16.0",
"fs-extra": "^7.0.0",
"get-folder-size": "^2.0.1",
@ -24,12 +24,12 @@
"sortify": "^1.0.4",
"stream-to-pull-stream": "^1.7.2",
"tweetnacl": "~0.12.2",
"ulimit": "0.0.2",
"ws": "^3.3.1"
},
"devDependencies": {
"flow-bin": "^0.59.0",
"jshint": "^2.10.2",
"less": "2.7.1",
"less": "3.7.1",
"lesshint": "^4.5.0",
"selenium-webdriver": "^3.6.0"
},
@ -40,8 +40,8 @@
"package": "PACKAGE=1 node server.js",
"lint": "jshint --config .jshintrc --exclude-path .jshintignore . && ./node_modules/lesshint/bin/lesshint -c ./.lesshintrc ./customize.dist/src/less2/",
"lint:js": "jshint --config .jshintrc --exclude-path .jshintignore .",
"lint:server": "jshint --config .jshintrc lib",
"lint:less": "./node_modules/lesshint/bin/lesshint -c ./.lesshintrc ./customize.dist/src/less2/",
"flow": "./node_modules/.bin/flow",
"test": "node scripts/TestSelenium.js",
"test-rpc": "cd scripts/tests && node test-rpc",
"template": "cd customize.dist/src && for page in ../index.html ../privacy.html ../terms.html ../about.html ../contact.html ../what-is-cryptpad.html ../features.html ../../www/login/index.html ../../www/register/index.html ../../www/user/index.html;do echo $page; cp template.html $page; done;",

1766
rpc.js
File diff suppressed because it is too large
View File

3
scripts/check-account-deletion.js

@ -1,7 +1,6 @@
/* jshint esversion: 6, node: true */
const Fs = require('fs');
const nThen = require('nthen');
const Pinned = require('./pinned');
const Nacl = require('tweetnacl/nacl-fast');
const Path = require('path');
const Pins = require('../lib/pins');
@ -41,7 +40,7 @@ nThen((waitFor) => {
pinned = Pins.calculateFromLog(content.toString('utf8'), f);
}));
}).nThen((waitFor) => {
Pinned.load(waitFor((d) => {
Pins.list(waitFor((err, d) => {
data = Object.keys(d);
}), {
exclude: [edPublic + '.ndjson']

42
scripts/compare-pin-methods.js

@ -0,0 +1,42 @@
/* jshint esversion: 6, node: true */
const nThen = require("nthen");
const Pins = require("../lib/pins");
const Assert = require("assert");
const config = require("../lib/load-config");
var compare = function () {
console.log(config);
var conf = {
pinPath: config.pinPath,
};
var list, load;
nThen(function (w) {
Pins.list(w(function (err, p) {
if (err) { throw err; }
list = p;
console.log(p);
console.log(list);
console.log();
}), conf);
}).nThen(function (w) {
Pins.load(w(function (err, p) {
if (err) { throw err; }
load = p;
console.log(load);
console.log();
}), conf);
}).nThen(function () {
console.log({
listLength: Object.keys(list).length,
loadLength: Object.keys(load).length,
});
Assert.deepEqual(list, load);
console.log("methods are equivalent");
});
};
compare();

8
scripts/diagnose-archive-conflicts.js

@ -1,6 +1,6 @@
var nThen = require("nthen");
var Store = require("../storage/file");
var Store = require("../lib/storage/file");
var config = require("../lib/load-config");
var store;
@ -8,7 +8,11 @@ var Log;
nThen(function (w) {
// load the store which will be used for iterating over channels
// and performing operations like archival and deletion
Store.create(config, w(function (_) {
Store.create(config, w(function (err, _) {
if (err) {
w.abort();
throw err;
}
store = _;
}));

90
scripts/evict-inactive.js

@ -1,8 +1,8 @@
var nThen = require("nthen");
var Store = require("../storage/file");
var BlobStore = require("../storage/blob");
var Pinned = require("./pinned");
var Store = require("../lib/storage/file");
var BlobStore = require("../lib/storage/blob");
var Pins = require("../lib/pins");
var config = require("../lib/load-config");
// the administrator should have set an 'inactiveTime' in their config
@ -34,11 +34,15 @@ var msSinceStart = function () {
nThen(function (w) {
// load the store which will be used for iterating over channels
// and performing operations like archival and deletion
Store.create(config, w(function (_) {
Store.create(config, w(function (err, _) {
if (err) {
w.abort();
throw err;
}
store = _;
})); // load the list of pinned files so you know which files
// should not be archived or deleted
Pinned.load(w(function (err, _) {
Pins.load(w(function (err, _) {
if (err) {
w.abort();
return void console.error(err);
@ -116,6 +120,8 @@ nThen(function (w) {
store.listArchivedChannels(handler, w(done));
}).nThen(function (w) {
if (typeof(config.archiveRetentionTime) !== "number") { return; }
// Iterate over archive blob ownership proofs and remove them
// if they are older than the specified retention time
var removed = 0;
blobs.list.archived.proofs(function (err, item, next) {
if (err) {
@ -138,6 +144,8 @@ nThen(function (w) {
}));
}).nThen(function (w) {
if (typeof(config.archiveRetentionTime) !== "number") { return; }
// Iterate over archived blobs and remove them
// if they are older than the specified retention time
var removed = 0;
blobs.list.archived.blobs(function (err, item, next) {
if (err) {
@ -158,29 +166,40 @@ nThen(function (w) {
}, w(function () {
Log.info('EVICT_ARCHIVED_BLOBS_REMOVED', removed);
}));
/* TODO find a reliable metric for determining the activity of blobs...
}).nThen(function (w) {
var blobCount = 0;
var lastHour = 0;
// iterate over blobs and remove them
// if they have not been accessed within the specified retention time
var removed = 0;
blobs.list.blobs(function (err, item, next) {
blobCount++;
if (err) {
Log.error("EVICT_BLOB_LIST_BLOBS_ERROR", err);
return void next();
}
if (pins[item.blobId]) { return void next(); }
if (item && getNewestTime(item) > retentionTime) { return void next(); }
// TODO determine when to retire blobs
console.log(item);
next();
blobs.archive.blob(item.blobId, function (err) {
if (err) {
Log.error("EVICT_ARCHIVE_BLOB_ERROR", {
error: err,
item: item,
});
return void next();
}
Log.info("EVICT_ARCHIVE_BLOB", {
item: item,
});
removed++;
next();
});
}, w(function () {
console.log("Listed %s blobs", blobCount);
console.log("Listed %s blobs accessed in the last hour", lastHour);
Log.info('EVICT_BLOBS_REMOVED', removed);
}));
}).nThen(function (w) {
var proofCount = 0;
// iterate over blob proofs and remove them
// if they don't correspond to a pinned or active file
var removed = 0;
blobs.list.proofs(function (err, item, next) {
proofCount++;
if (err) {
next();
return void Log.error("EVICT_BLOB_LIST_PROOFS_ERROR", err);
@ -205,15 +224,14 @@ nThen(function (w) {
if (err) {
return Log.error("EVICT_BLOB_PROOF_LONELY_ERROR", item);
}
removed++;
return Log.info("EVICT_BLOB_PROOF_LONELY", item);
});
});
}, function () {
console.log("Listed %s blob proofs", proofCount);
});
*/
}, w(function () {
Log.info("EVICT_BLOB_PROOFS_REMOVED", removed);
}));
}).nThen(function (w) {
var removed = 0;
var channels = 0;
var archived = 0;
@ -245,42 +263,22 @@ nThen(function (w) {
// ignore the channel if it's pinned
if (pins[item.channel]) { return void cb(); }
// if the server is configured to retain data, archive the channel
if (config.retainData) {
return void store.archiveChannel(item.channel, w(function (err) {
if (err) {
Log.error('EVICT_CHANNEL_ARCHIVAL_ERROR', {
error: err,
channel: item.channel,
});
return void cb();
}
Log.info('EVICT_CHANNEL_ARCHIVAL', item.channel);
archived++;
cb();
}));
}
// otherwise remove it
store.removeChannel(item.channel, w(function (err) {
return void store.archiveChannel(item.channel, w(function (err) {
if (err) {
Log.error('EVICT_CHANNEL_REMOVAL_ERROR', {
Log.error('EVICT_CHANNEL_ARCHIVAL_ERROR', {
error: err,
channel: item.channel,
});
return void cb();
}
Log.info('EVICT_CHANNEL_REMOVAL', item.channel);
removed++;
Log.info('EVICT_CHANNEL_ARCHIVAL', item.channel);
archived++;
cb();
}));
};
var done = function () {
if (config.retainData) {
return void Log.info('EVICT_CHANNELS_ARCHIVED', archived);
}
return void Log.info('EVICT_CHANNELS_REMOVED', removed);
return void Log.info('EVICT_CHANNELS_ARCHIVED', archived);
};
store.listChannels(handler, w(done));

4
scripts/expire-channels.js

@ -1,9 +1,9 @@
var nThen = require("nthen");
var Tasks = require("../storage/tasks");
var Tasks = require("../lib/storage/tasks");
var Logger = require("../lib/log");
var config = require("../lib/load-config");
var FileStorage = require('../' + config.storage || './storage/file');
var FileStorage = require('../lib/storage/file');
nThen(function (w) {
Logger.create(config, w(function (_log) {

4
scripts/migrations/migrate-tasks-v1.js

@ -1,5 +1,5 @@
var nThen = require("nthen");
var Tasks = require("../../storage/tasks");
var Tasks = require("../../lib/storage/tasks");
var Logger = require("../../lib/log");
var config = require("../../lib/load-config");
@ -7,7 +7,7 @@ var config = require("../../lib/load-config");
// this isn't strictly necessary for what we want to do
// but the API requires it, and I don't feel like changing that
// --ansuz
var FileStorage = require("../../" + (config.storage || "./storage/file"));
var FileStorage = require("../../lib/storage/file");
var tasks;
nThen(function (w) {

5
scripts/restore-archived.js

@ -1,6 +1,6 @@
var nThen = require("nthen");
var Store = require("../storage/file");
var Store = require("../lib/storage/file");
var config = require("../lib/load-config");
var store;
@ -8,7 +8,8 @@ var Log;
nThen(function (w) {
// load the store which will be used for iterating over channels
// and performing operations like archival and deletion
Store.create(config, w(function (_) {
Store.create(config, w(function (err, _) {
if (err) { throw err; }
store = _;
}));

235
scripts/tests/test-mailbox.js

@ -0,0 +1,235 @@
/* globals process */
var Client = require("../../lib/client/");
var Crypto = require("../../www/bower_components/chainpad-crypto");
var Mailbox = Crypto.Mailbox;
var Nacl = require("tweetnacl/nacl-fast");
var nThen = require("nthen");
var Pinpad = require("../../www/common/pinpad");
var Rpc = require("../../www/common/rpc");
var Hash = require("../../www/common/common-hash");
var CpNetflux = require("../../www/bower_components/chainpad-netflux");
var Util = require("../../lib/common-util");
// you need more than 100 messages in the history, and you need a lastKnownHash between "50" and "length - 50"
var createMailbox = function (config, _cb) {
var cb = Util.once(Util.mkAsync(_cb));
var webchannel;
var user = config.user;
user.messages = [];
CpNetflux.start({
network: config.network,
channel: config.channel,
crypto: config.crypto,
owners: [ config.edPublic ],
noChainPad: true,
lastKnownHash: config.lastKnownHash,
onChannelError: function (err) {
cb(err);
},
onConnect: function (wc /*, sendMessage */) {
webchannel = wc;
},
onMessage: function (msg /*, user, vKey, isCp, hash, author */) {
user.messages.push(msg);
},
onReady: function () {
cb(void 0, webchannel);
},
});
};
process.on('unhandledRejection', function (err) {
console.error(err);
});
var state = {};
var makeCurveKeys = function () {
var pair = Nacl.box.keyPair();
return {
curvePrivate: Nacl.util.encodeBase64(pair.secretKey),
curvePublic: Nacl.util.encodeBase64(pair.publicKey),
};
};
var makeEdKeys = function () {
var keys = Nacl.sign.keyPair.fromSeed(Nacl.randomBytes(Nacl.sign.seedLength));
return {
edPrivate: Nacl.util.encodeBase64(keys.secretKey),
edPublic: Nacl.util.encodeBase64(keys.publicKey),
};
};
var edKeys = makeEdKeys();
var curveKeys = makeCurveKeys();
var mailboxChannel = Hash.createChannelId();
var createUser = function (config, cb) {
// config should contain keys for a team rpc (ed)
// teamEdKeys
// rosterHash
var user;
nThen(function (w) {
Client.create(w(function (err, client) {
if (err) {
w.abort();
return void cb(err);
}
user = client;
user.destroy = Util.mkEvent(true);
user.destroy.reg(function () {
user.network.disconnect();
});
}));
}).nThen(function (w) {
// make all the parameters you'll need
var network = user.network = user.config.network;
user.edKeys = edKeys;
user.curveKeys = curveKeys;
user.mailbox = Mailbox.createEncryptor(user.curveKeys);
user.mailboxChannel = mailboxChannel;
// create an anon rpc for alice
Rpc.createAnonymous(network, w(function (err, rpc) {
if (err) {
w.abort();
user.shutdown();
return void console.error('ANON_RPC_CONNECT_ERR');
}
user.anonRpc = rpc;
user.destroy.reg(function () {
user.anonRpc.destroy();
});
}));
Pinpad.create(network, user.edKeys, w(function (err, rpc) {
if (err) {
w.abort();
user.shutdown();
console.error(err);
return console.log('RPC_CONNECT_ERR');
}
user.rpc = rpc;
user.destroy.reg(function () {
user.rpc.destroy();
});
}));
}).nThen(function (w) {
// create and subscribe to your mailbox
createMailbox({
user: user,
lastKnownHash: config.lastKnownHash,
network: user.network,
channel: user.mailboxChannel,
crypto: user.mailbox,
edPublic: user.edKeys.edPublic,
}, w(function (err /*, wc*/) {
if (err) {
w.abort();
//console.error("Mailbox creation error");
cb(err);
//process.exit(1);
}
//wc.leave();
}));
}).nThen(function () {
user.cleanup = function (cb) {
//console.log("Destroying user");
// TODO remove your mailbox
user.destroy.fire();
cb = cb;
};
cb(void 0, user);
});
};
var alice;
nThen(function (w) {
createUser({
//sharedConfig
}, w(function (err, _alice) {
if (err) {
w.abort();
return void console.log(err);
}
alice = _alice;
alice.name = 'alice';
}));
/*
createUser(sharedConfig, w(function (err, _bob) {
if (err) {
w.abort();
return void console.log(err);
}
bob = _bob;
bob.name = 'bob';
}));*/
}).nThen(function (w) {
var i = 0;
var next = w();
state.hashes = [];
var send = function () {
if (i++ >= 160) { return next(); }
var msg = alice.mailbox.encrypt(JSON.stringify({
pewpew: 'bangbang',
}), alice.curveKeys.curvePublic);
var hash = msg.slice(0, 64);
state.hashes.push(hash);
alice.anonRpc.send('WRITE_PRIVATE_MESSAGE', [
alice.mailboxChannel,
msg
//Nacl.util.encodeBase64(Nacl.randomBytes(128))
], w(function (err) {
if (err) { throw new Error(err); }
console.log('message %s written successfully', i);
setTimeout(send, 15);
}));
};
send();
}).nThen(function (w) {
console.log("Connecting with second user");
createUser({
lastKnownHash: state.hashes[55],
}, w(function (err, _alice) {
if (err) {
w.abort();
console.log("lastKnownHash: ", state.hashes[55]);
console.log(err);
process.exit(1);
//return void console.log(err);
}
var user = state.alice2 = _alice;
if (user.messages.length === 105) {
process.exit(0);
}
//console.log(user.messages, user.messages.length);
process.exit(1);
}));
}).nThen(function () {
}).nThen(function () {
alice.cleanup();
//bob.cleanup();
});

46
scripts/tests/test-pins.js

@ -0,0 +1,46 @@
/*jshint esversion: 6 */
const Pins = require("../../lib/pins");
var stats = {
users: 0,
lines: 0, // how many lines did you iterate over
surplus: 0, // how many of those lines were not needed?
pinned: 0, // how many files are pinned?
duplicated: 0,
};
var handler = function (ref, id /* safeKey */, pinned) {
if (ref.surplus) {
//console.log("%s has %s trimmable lines", id, ref.surplus);
stats.surplus += ref.surplus;
}
for (var item in ref.pins) {
if (!pinned.hasOwnProperty(item)) {
//console.log("> %s is pinned", item);
stats.pinned++;
} else {
//console.log("> %s was already pinned", item);
stats.duplicated++;
}
}
stats.users++;
stats.lines += ref.index;
//console.log(ref, id);
};
Pins.list(function (err) {
if (err) { return void console.error(err); }
/*
for (var id in pinned) {
console.log(id);
stats.pinned++;
}
*/
console.log(stats);
}, {
pinPath: require("../../lib/load-config").pinPath,
handler: handler,
});

41
scripts/tests/test-plan.js

@ -0,0 +1,41 @@
/*jshint esversion: 6 */
const Plan = require("../../lib/plan");
var rand_delay = function (f) {
setTimeout(f, Math.floor(Math.random() * 1500) + 250);
};
var plan = Plan(6).job(1, function (next) {
[1,2,3,4,5,6,7,8,9,10,11,12].forEach(function (n) {
plan.job(0, function (next) {
rand_delay(function () {
console.log("finishing job %s", n);
next();
});
});
});
console.log("finishing job 0");
next();
}).job(2, function (next) {
console.log("finishing job 13");
[
100,
200,
300,
400
].forEach(function (n) {
plan.job(3, function (next) {
rand_delay(function () {
console.log("finishing job %s", n);
next();
});
});
});
next();
}).done(function () { console.log("DONE"); }).start();
//console.log(plan);
//plan.start();

183
scripts/tests/test-rpc.js

@ -159,6 +159,13 @@ var createUser = function (config, cb) {
}
wc.leave();
}));
}).nThen(function (w) {
// FIXME give the server time to write your mailbox data before checking that it's correct
// chainpad-server sends an ACK before the channel has actually been created
// causing you to think that everything is good.
// without this timeout the GET_METADATA rpc occasionally returns before
// the metadata has actually been written to the disk.
setTimeout(w(), 500);
}).nThen(function (w) {
// confirm that you own your mailbox
user.anonRpc.send("GET_METADATA", user.mailboxChannel, w(function (err, data) {
@ -227,6 +234,18 @@ var createUser = function (config, cb) {
return void cb(err);
}
}));
}).nThen(function (w) {
// some basic sanity checks...
user.rpc.getServerHash(w(function (err, hash) {
if (err) {
w.abort();
return void cb(err);
}
if (hash !== EMPTY_ARRAY_HASH) {
console.error("EXPECTED EMPTY ARRAY HASH");
process.exit(1);
}
}));
}).nThen(function () {
user.cleanup = function (cb) {
@ -338,9 +357,154 @@ nThen(function (w) {
bob.name = 'bob';
//console.log("Initialized Bob");
}));
}).nThen(function (w) {
// restrict access to oscar's mailbox channel
oscar.rpc.send('SET_METADATA', {
command: 'RESTRICT_ACCESS',
channel: oscar.mailboxChannel,
value: [ true ]
}, w(function (err, response) {
if (err) {
return void console.log(err);
}
var metadata = response[0];
if (!(metadata && metadata.restricted)) {
throw new Error("EXPECTED MAILBOX TO BE RESTRICTED");
}
}));
}).nThen(function (w) {
alice.anonRpc.send('GET_METADATA', oscar.mailboxChannel, w(function (err, response) {
if (!response) { throw new Error("EXPECTED RESPONSE"); }
var metadata = response[0];
var expected_fields = ['restricted', 'allowed', 'rejected'];
for (var key in metadata) {
if (expected_fields.indexOf(key) === -1) {
console.log(metadata);
throw new Error("EXPECTED METADATA TO BE RESTRICTED");
}
}
}));
}).nThen(function (w) {
alice.anonRpc.send('WRITE_PRIVATE_MESSAGE', [
oscar.mailboxChannel,
'["VANDALISM"]',
], w(function (err) {
if (err !== 'INSUFFICIENT_PERMISSIONS') {
throw new Error("EXPECTED INSUFFICIENT PERMISSIONS ERROR");
}
}));
}).nThen(function (w) {
// add alice to oscar's mailbox's allow list for some reason
oscar.rpc.send('SET_METADATA', {
command: 'ADD_ALLOWED',
channel: oscar.mailboxChannel,
value: [
alice.edKeys.edPublic
]
}, w(function (err, response) {
var metadata = response && response[0];
if (!metadata || !Array.isArray(metadata.allowed) ||
metadata.allowed.indexOf(alice.edKeys.edPublic) === -1) {
throw new Error("EXPECTED ALICE TO BE IN THE ALLOW LIST");
}
}));
}).nThen(function (w) {
oscar.anonRpc.send('GET_METADATA', oscar.mailboxChannel, w(function (err, response) {
if (err) {
throw new Error("OSCAR SHOULD BE ABLE TO READ HIS OWN METADATA");
}
var metadata = response && response[0];
if (!metadata) {
throw new Error("EXPECTED METADATA");
}
if (metadata.allowed[0] !== alice.edKeys.edPublic) {
throw new Error("EXPECTED ALICE TO BE ON ALLOW LIST");
}
}));
}).nThen(function () {
//setTimeout(w(), 500);
alice.anonRpc.send('GET_METADATA', oscar.mailboxChannel, function (err, response) {
var metadata = response && response[0];
if (!metadata || !metadata.restricted || !metadata.channel) {
throw new Error("EXPECTED FULL ACCESS TO CHANNEL METADATA");
}
});
}).nThen(function (w) {
//throw new Error("boop");
// add alice as an owner of oscar's mailbox for some reason
oscar.rpc.send('SET_METADATA', {
command: 'ADD_OWNERS',
channel: oscar.mailboxChannel,
value: [
alice.edKeys.edPublic
]
}, Util.mkTimeout(w(function (err) {
if (err === 'TIMEOUT') {
throw new Error(err);
}
if (err) {
throw new Error("ADD_OWNERS_FAILURE");
}
}), 2000));
}).nThen(function (w) {
// alice should now be able to read oscar's mailbox metadata
alice.anonRpc.send('GET_METADATA', oscar.mailboxChannel, w(function (err, response) {
if (err) {
throw new Error("EXPECTED ALICE TO BE ALLOWED TO READ OSCAR'S METADATA");
}
var metadata = response && response[0];
if (!metadata) { throw new Error("EXPECTED METADATA"); }
if (metadata.allowed.length !== 0) {
throw new Error("EXPECTED AN EMPTY ALLOW LIST");
}
}));
}).nThen(function (w) {
// disable the access restrictionallow list
oscar.rpc.send('SET_METADATA', {
command: 'RESTRICT_ACCESS',
channel: oscar.mailboxChannel,
value: [
false
]
}, w(function (err) {
if (err) {
throw new Error("COULD_NOT_DISABLE_RESTRICTED_ACCESS");
}
}));
// add alice to oscar's mailbox's allow list for some reason
oscar.rpc.send('SET_METADATA', {
command: 'ADD_ALLOWED',
channel: oscar.mailboxChannel,
value: [
bob.edKeys.edPublic
]
}, w(function (err) {
if (err) {
return void console.error(err);
}
}));
}).nThen(function (w) {
oscar.anonRpc.send('GET_METADATA', oscar.mailboxChannel, w(function (err, response) {
if (err) {
throw new Error("OSCAR SHOULD BE ABLE TO READ HIS OWN METADATA");
}
var metadata = response && response[0];
if (!metadata) {
throw new Error("EXPECTED METADATA");
}
if (metadata.allowed[0] !== bob.edKeys.edPublic) {
throw new Error("EXPECTED ALICE TO BE ON ALLOW LIST");
}
if (metadata.restricted) {
throw new Error("RESTRICTED_ACCESS_NOT_DISABLED");
}
}));
}).nThen(function () {
//setTimeout(w(), 500);
}).nThen(function (w) {
// Alice loads the roster...
var rosterKeys = Crypto.Team.deriveMemberKeys(sharedConfig.rosterSeed, alice.curveKeys);
@ -491,7 +655,7 @@ nThen(function (w) {
console.error("checkpoint by member failed as expected");
}));
}).nThen(function (w) {
console.log("STATE =", JSON.stringify(oscar.roster.getState(), null, 2));
//console.log("STATE =", JSON.stringify(oscar.roster.getState(), null, 2));
// oscar describes the team
oscar.roster.metadata({
@ -499,7 +663,7 @@ nThen(function (w) {
topic: "pewpewpew",
}, w(function (err) {
if (err) { return void console.log(err); }
console.log("STATE =", JSON.stringify(oscar.roster.getState(), null, 2));
//console.log("STATE =", JSON.stringify(oscar.roster.getState(), null, 2));
}));
}).nThen(function (w) {
// oscar sends a checkpoint
@ -554,6 +718,7 @@ nThen(function (w) {
}));
}).nThen(function (w) {
oscar.roster.checkpoint(w(function (err) {
oscar.lastRosterCheckpointHash = oscar.roster.getLastCheckpointHash(); // FIXME bob should connect to this to avoid extra messages
if (!err) { return; }
console.error("Checkpoint by an owner failed unexpectedly");
console.error(err);
@ -578,21 +743,21 @@ nThen(function (w) {
channel: rosterKeys.channel,
keys: rosterKeys,
anon_rpc: bob.anonRpc,
lastKnownHash: oscar.lastKnownHash,
//lastKnownHash: oscar.lastRosterCheckpointHash
//lastKnownHash: oscar.lastKnownHash, // FIXME this doesn't work. off-by-one?
}, w(function (err, roster) {
if (err) {
w.abort();
return void console.trace(err);
}
bob.roster = roster;
if (JSON.stringify(bob.roster.getState()) !== JSON.stringify(oscar.roster.getState())) {
console.log("BOB AND OSCAR DO NOT HAVE THE SAME STATE");
//console.log("BOB AND OSCAR DO NOT HAVE THE SAME STATE");
console.log("BOB =", JSON.stringify(bob.roster.getState(), null, 2));
console.log("OSCAR =", JSON.stringify(oscar.roster.getState(), null, 2));
throw new Error("BOB AND OSCAR DO NOT HAVE THE SAME STATE");
}
bob.destroy.reg(function () {
roster.stop();
});
@ -639,8 +804,8 @@ nThen(function (w) {
bob.roster.describe(data, w(function (err) {
if (err) {
console.error("self-description by a member failed unexpectedly");
process.exit(1);
console.error(err);
throw new Error("self-description by a member failed unexpectedly");
}
}));
}).nThen(function (w) {

220
scripts/tests/test-scheduler.js

@ -0,0 +1,220 @@
/* three types of actions:
* read
* write
* append
each of which take a random amount of time
*/
var Util = require("../../lib/common-util");
var schedule = require("../../lib/schedule")();
var nThen = require("nthen");
var rand = function (n) {
return Math.floor(Math.random() * n);
};
var rand_time = function () {
// between 51 and 151
return rand(300) + 25;
};
var makeAction = function (type) {
var i = 0;
return function (time) {
var j = i++;
return function (next) {
console.log(" Beginning action: %s#%s", type, j);
setTimeout(function () {
console.log(" Completed action: %s#%s", type, j);
next();
}, time);
return j;
};
};
};
var TYPES = ['WRITE', 'READ', 'APPEND'];
var chooseAction = function () {
var n = rand(100);
if (n < 50) { return 'APPEND'; }
if (n < 90) { return 'READ'; }
return 'WRITE';
//return TYPES[rand(3)];
};
var test = function (script, cb) {
var uid = Util.uid();
var TO_RUN = script.length;
var total_run = 0;
var parallel = 0;
var last_run_ordered = -1;
//var i = 0;
var ACTIONS = {};
TYPES.forEach(function (type) {
ACTIONS[type] = makeAction(type);
});
nThen(function (w) {
setTimeout(w(), 3000);
// run scripted actions with assertions
script.forEach(function (scene) {
var type = scene[0];
var time = typeof(scene[1]) === 'number'? scene[1]: rand_time();
var action = ACTIONS[type](time);
console.log("Queuing action of type: %s(%s)", type, time);
var proceed = w();
switch (type) {
case 'APPEND':
return schedule.ordered(uid, w(function (next) {
parallel++;
var temp = action(function () {
parallel--;
total_run++;
proceed();
next();
});
if (temp !== (last_run_ordered + 1)) {
throw new Error("out of order");
}
last_run_ordered = temp;
}));
case 'WRITE':
return schedule.blocking(uid, w(function (next) {
parallel++;
action(function () {
parallel--;
total_run++;
proceed();
next();
});
if (parallel > 1) {
console.log("parallelism === %s", parallel);
throw new Error("too much parallel");
}
}));
case 'READ':
return schedule.unordered(uid, w(function (next) {
parallel++;
action(function () {
parallel--;
total_run++;
proceed();
next();
});
}));
default:
throw new Error("wut");
}
});
}).nThen(function () {
// make assertions about the whole script
if (total_run !== TO_RUN) {
console.log("Ran %s / %s", total_run, TO_RUN);
throw new Error("skipped tasks");
}
console.log("total_run === %s", total_run);
cb();
});
};
var randomScript = function () {
var len = rand(15) + 10;
var script = [];
while (len--) {
script.push([
chooseAction(),
rand_time(),
]);
}
return script;
};
var WRITE = function (t) {
return ['WRITE', t];
};
var READ = function (t) {
return ['READ', t];
};
var APPEND = function (t) {
return ['APPEND', t];
};
nThen(function (w) {
test([
['READ', 150],
['APPEND', 200],
['APPEND', 100],
['READ', 350],
['WRITE', 400],
['APPEND', 275],
['APPEND', 187],
['WRITE', 330],
['WRITE', 264],
['WRITE', 256],
], w(function () {
console.log("finished pre-scripted test\n");
}));
}).nThen(function (w) {
test([
WRITE(289),
APPEND(281),
READ(207),
WRITE(225),
READ(279),
WRITE(300),
READ(331),
APPEND(341),
APPEND(385),
READ(313),
WRITE(285),
READ(304),
APPEND(273),
APPEND(150),
WRITE(246),
READ(244),
WRITE(172),
APPEND(253),
READ(215),
READ(296),
APPEND(281),
APPEND(296),
WRITE(168),
], w(function () {
console.log("finished 2nd pre-scripted test\n");
}));
}).nThen(function () {
var totalTests = 50;
var randomTests = 1;
var last = nThen(function () {
console.log("beginning randomized tests");
});
var queueRandomTest = function (i) {
last = last.nThen(function (w) {
console.log("running random test script #%s\n", i);
test(randomScript(), w(function () {
console.log("finished random test #%s\n", i);
}));
});
};
while (randomTests <=totalTests) { queueRandomTest(randomTests++); }
last.nThen(function () {
console.log("finished %s random tests", totalTests);
});
});

295
server.js

@ -4,17 +4,14 @@
var Express = require('express');
var Http = require('http');
var Fs = require('fs');
var WebSocketServer = require('ws').Server;
var NetfluxSrv = require('chainpad-server/NetfluxWebsocketSrv');
var Package = require('./package.json');
var Path = require("path");
var nThen = require("nthen");
var Util = require("./lib/common-util");
var Default = require("./lib/defaults");
var config = require("./lib/load-config");
// support multiple storage back ends
var Storage = require('./storage/file');
var app = Express();
// mode can be FRESH (default), DEV, or PACKAGE
@ -39,7 +36,53 @@ if (process.env.PACKAGE) {
FRESH_KEY = +new Date();
}
(function () {
// you absolutely must provide an 'httpUnsafeOrigin'
if (typeof(config.httpUnsafeOrigin) !== 'string') {
throw new Error("No 'httpUnsafeOrigin' provided");
}
config.httpUnsafeOrigin = config.httpUnsafeOrigin.trim();
if (typeof(config.httpSafeOrigin) === 'string') {
config.httpSafeOrigin = config.httpSafeOrigin.trim().replace(/\/$/, '');
}
// fall back to listening on a local address
// if httpAddress is not a string
if (typeof(config.httpAddress) !== 'string') {
config.httpAddress = '127.0.0.1';
}
// listen on port 3000 if a valid port number was not provided
if (typeof(config.httpPort) !== 'number' || config.httpPort > 65535) {
config.httpPort = 3000;
}
if (typeof(config.httpSafeOrigin) !== 'string') {
if (typeof(config.httpSafePort) !== 'number') {
config.httpSafePort = config.httpPort + 1;
}
if (DEV_MODE) { return; }
console.log(`
m m mm mmmmm mm m mmmmm mm m mmm m
# # # ## # "# #"m # # #"m # m" " #
" #"# # # # #mmmm" # #m # # # #m # # mm #
## ##" #mm# # "m # # # # # # # # #
# # # # # " # ## mm#mm # ## "mmm" #
`);
console.log("\nNo 'httpSafeOrigin' provided.");
console.log("Your configuration probably isn't taking advantage of all of CryptPad's security features!");
console.log("This is acceptable for development, otherwise your users may be at risk.\n");
console.log("Serving sandboxed content via port %s.\nThis is probably not what you want for a production instance!\n", config.httpSafePort);
}
}());
var configCache = {};
config.flushCache = function () {
configCache = {};
FRESH_KEY = +new Date();
if (!(DEV_MODE || FRESH_MODE)) { FRESH_MODE = true; }
if (!config.log) { return; }
@ -49,11 +92,21 @@ config.flushCache = function () {
const clone = (x) => (JSON.parse(JSON.stringify(x)));
var setHeaders = (function () {
if (typeof(config.httpHeaders) !== 'object') { return function () {}; }
// load the default http headers unless the admin has provided their own via the config file
var headers;
var custom = config.httpHeaders;
// if the admin provided valid http headers then use them
if (custom && typeof(custom) === 'object' && !Array.isArray(custom)) {
headers = clone(custom);
} else {
// otherwise use the default
headers = Default.httpHeaders();
}
const headers = clone(config.httpHeaders);
if (config.contentSecurity) {
headers['Content-Security-Policy'] = clone(config.contentSecurity);
// next define the base Content Security Policy (CSP) headers
if (typeof(config.contentSecurity) === 'string') {
headers['Content-Security-Policy'] = config.contentSecurity;
if (!/;$/.test(headers['Content-Security-Policy'])) { headers['Content-Security-Policy'] += ';' }
if (headers['Content-Security-Policy'].indexOf('frame-ancestors') === -1) {
// backward compat for those who do not merge the new version of the config
@ -61,19 +114,25 @@ var setHeaders = (function () {
// It also fixes the cross-domain iframe.
headers['Content-Security-Policy'] += "frame-ancestors *;";
}
} else {
// use the default CSP headers constructed with your domain
headers['Content-Security-Policy'] = Default.contentSecurity(config.httpUnsafeOrigin);
}
const padHeaders = clone(headers);
if (config.padContentSecurity) {
padHeaders['Content-Security-Policy'] = clone(config.padContentSecurity);
if (typeof(config.padContentSecurity) === 'string') {
padHeaders['Content-Security-Policy'] = config.padContentSecurity;
} else {
padHeaders['Content-Security-Policy'] = Default.padContentSecurity(config.httpUnsafeOrigin);
}
if (Object.keys(headers).length) {
return function (req, res) {
const h = [
/^\/pad(2)?\/inner\.html.*/,
/^\/sheet\/inner\.html.*/,
/^\/common\/onlyoffice\/.*\/index\.html.*/
/^\/pad\/inner\.html.*/,
/^\/common\/onlyoffice\/.*\/index\.html.*/,
/^\/(sheet|ooslide|oodoc)\/inner\.html.*/,
].some((regex) => {
return regex.test(req.url)
return regex.test(req.url);
}) ? padHeaders : headers;
for (let header in h) { res.setHeader(header, h[header]); }
};
@ -115,15 +174,10 @@ app.use(function (req, res, next) {
app.use(Express.static(__dirname + '/www'));
Fs.exists(__dirname + "/customize", function (e) {
if (e) { return; }
console.log("Cryptpad is customizable, see customize.dist/readme.md for details");
});
// FIXME I think this is a regression caused by a recent PR
// correct this hack without breaking the contributor's intended behaviour.
var mainPages = config.mainPages || ['index', 'privacy', 'terms', 'about', 'contact'];
var mainPages = config.mainPages || Default.mainPages();
var mainPagePattern = new RegExp('^\/(' + mainPages.join('|') + ').html$');
app.get(mainPagePattern, Express.static(__dirname + '/customize'));
app.get(mainPagePattern, Express.static(__dirname + '/customize.dist'));
@ -153,38 +207,74 @@ try {
});
} catch (e) { console.error("Can't parse admin keys"); }
// TODO, cache this /api/config responses instead of re-computing it each time
app.get('/api/config', function(req, res){
// TODO precompute any data that isn't dynamic to save some CPU time
var host = req.headers.host.replace(/\:[0-9]+/, '');
res.setHeader('Content-Type', 'text/javascript');
res.send('define(function(){\n' + [
'var obj = ' + JSON.stringify({
requireConf: {
waitSeconds: 600,
urlArgs: 'ver=' + Package.version + (FRESH_KEY? '-' + FRESH_KEY: '') + (DEV_MODE? '-' + (+new Date()): ''),
},
removeDonateButton: (config.removeDonateButton === true),
allowSubscriptions: (config.allowSubscriptions === true),
websocketPath: config.externalWebsocketURL,
httpUnsafeOrigin: config.httpUnsafeOrigin.replace(/^\s*/, ''),
adminEmail: config.adminEmail,
adminKeys: admins,
inactiveTime: config.inactiveTime,
supportMailbox: config.supportMailboxPublicKey
}, null, '\t'),
'obj.httpSafeOrigin = ' + (function () {
if (config.httpSafeOrigin) { return '"' + config.httpSafeOrigin + '"'; }
if (config.httpSafePort) {
return "(function () { return window.location.origin.replace(/\:[0-9]+$/, ':" +
config.httpSafePort + "'); }())";
}
return 'window.location.origin';
}()),
'return obj',
'});'
].join(';\n'));
});
var serveConfig = (function () {
// if dev mode: never cache
var cacheString = function () {
return (FRESH_KEY? '-' + FRESH_KEY: '') + (DEV_MODE? '-' + (+new Date()): '');
};
var template = function (host) {
return [
'define(function(){',
'var obj = ' + JSON.stringify({
requireConf: {
waitSeconds: 600,
urlArgs: 'ver=' + Package.version + cacheString(),
},
removeDonateButton: (config.removeDonateButton === true),
allowSubscriptions: (config.allowSubscriptions === true),
websocketPath: config.externalWebsocketURL,
httpUnsafeOrigin: config.httpUnsafeOrigin,
adminEmail: config.adminEmail,
adminKeys: admins,
inactiveTime: config.inactiveTime,
supportMailbox: config.supportMailboxPublicKey,
maxUploadSize: config.maxUploadSize,
premiumUploadSize: config.premiumUploadSize,
}, null, '\t'),
'obj.httpSafeOrigin = ' + (function () {
if (config.httpSafeOrigin) { return '"' + config.httpSafeOrigin + '"'; }
if (config.httpSafePort) {
return "(function () { return window.location.origin.replace(/\:[0-9]+$/, ':" +
config.httpSafePort + "'); }())";
}
return 'window.location.origin';
}()),
'return obj',
'});'
].join(';\n')
};
var cleanUp = {};
return function (req, res) {
var host = req.headers.host.replace(/\:[0-9]+/, '');
res.setHeader('Content-Type', 'text/javascript');
// don't cache anything if you're in dev mode
if (DEV_MODE) {
return void res.send(template(host));
}
// generate a lookup key for the cache
var cacheKey = host + ':' + cacheString();
// if there's nothing cached for that key...
if (!configCache[cacheKey]) {
// generate the response and cache it in memory
configCache[cacheKey] = template(host);
// and create a function to conditionally evict cache entries
// which have not been accessed in the last 20 seconds
cleanUp[cacheKey] = Util.throttle(function () {
delete cleanUp[cacheKey];
delete configCache[cacheKey];
}, 20000);
}
// successive calls to this function
cleanUp[cacheKey]();
return void res.send(configCache[cacheKey]);
};
}());
app.get('/api/config', serveConfig);
var four04_path = Path.resolve(__dirname + '/customize.dist/404.html');
var custom_four04_path = Path.resolve(__dirname + '/customize/404.html');
@ -205,81 +295,36 @@ app.use(function (req, res, next) {
var httpServer = Http.createServer(app);
httpServer.listen(config.httpPort,config.httpAddress,function(){
var host = config.httpAddress;
var hostName = !host.indexOf(':') ? '[' + host + ']' : host;
var port = config.httpPort;
var ps = port === 80? '': ':' + port;
console.log('[%s] server available http://%s%s', new Date().toISOString(), hostName, ps);
});
if (config.httpSafePort) {
Http.createServer(app).listen(config.httpSafePort, config.httpAddress);
}
var wsConfig = { server: httpServer };
nThen(function (w) {
Fs.exists(__dirname + "/customize", w(function (e) {
if (e) { return; }
console.log("Cryptpad is customizable, see customize.dist/readme.md for details");
}));
}).nThen(function (w) {
httpServer.listen(config.httpPort,config.httpAddress,function(){
var host = config.httpAddress;
var hostName = !host.indexOf(':') ? '[' + host + ']' : host;
var rpc;
var historyKeeper;
var port = config.httpPort;
var ps = port === 80? '': ':' + port;
var log;
console.log('[%s] server available http://%s%s', new Date().toISOString(), hostName, ps);
});
// Initialize logging, the the store, then tasks, then rpc, then history keeper and then start the server
var nt = nThen(function (w) {
// set up logger
var Logger = require("./lib/log");
//console.log("Loading logging module");
Logger.create(config, w(function (_log) {
log = config.log = _log;
}));
}).nThen(function (w) {
if (config.externalWebsocketURL) {
// if you plan to use an external websocket server
// then you don't need to load any API services other than the logger.
// Just abort.
w.abort();
return;
if (config.httpSafePort) {
Http.createServer(app).listen(config.httpSafePort, config.httpAddress, w());
}
Storage.create(config, w(function (_store) {
config.store = _store;
}));
}).nThen(function (w) {
var Tasks = require("./storage/tasks");
Tasks.create(config, w(function (e, tasks) {
if (e) {
throw e;
}
config.tasks = tasks;
if (config.disableIntegratedTasks) { return; }
setInterval(function () {
tasks.runAll(function (err) {
if (err) {
// either TASK_CONCURRENCY or an error with tasks.list
// in either case it is already logged.
}
});
}, 1000 * 60 * 5); // run every five minutes
}));
}).nThen(function (w) {
require("./rpc").create(config, w(function (e, _rpc) {
if (e) {
w.abort();
throw e;
}
rpc = _rpc;
}));
}).nThen(function () {
var HK = require('./historyKeeper.js');
var hkConfig = {
tasks: config.tasks,
rpc: rpc,
store: config.store,
log: log,
retainData: Boolean(config.retainData),
};
historyKeeper = HK.create(hkConfig);
}).nThen(function () {
var wsSrv = new WebSocketServer(wsConfig);
NetfluxSrv.run(wsSrv, config, historyKeeper);
var wsConfig = { server: httpServer };
// Initialize logging then start the API server
require("./lib/log").create(config, function (_log) {
config.log = _log;
config.httpServer = httpServer;
if (config.externalWebsocketURL) { return; }
require("./lib/api").create(config);
});
});

59
storage/README.md

@ -1,59 +0,0 @@
# Storage Mechanisms
Cryptpad's message API is quite simple and modular, and it isn't especially difficult to write alternative modules that employ your favourite datastore.
There are a few guidelines for creating a module:
Dependencies for your storage engine **should not** be added to Cryptpad.
Instead, write an adaptor, and place it in `cryptpad/storage/yourAdaptor.js`.
Alternatively, storage adaptors can be published to npm, and required from your config (once installed).
## Your adaptor should conform to a simple API.
It must export an object with a single property, `create`, which is a function.
That function must accept two arguments:
1. an object containing configuration values
- any configuration values that you require should be well documented
- they should also be named carefully so as to avoid collisions with other modules
2. a callback
- this callback is used to return an object with (currently) two methods
- even if your storage mechanism can be executed synchronously, we use the callback pattern for portability.
## Methods
### message(channelName, content, handler)
When Cryptpad receives a message, it saves it into its datastore using its equivalent of a table for its channel name, and then relays the message to every other client which is participating in the same channel.
Relaying logic exists outside of the storage module, you simply need to store the message then execute the handler on success.
### getMessages(channelName, handler, callback)
When a new client joins, they request the entire history of messages for a particular channel.
This method retreives those messages, and delivers them in order.
In practice, out of order messages make your clientside application more likely to fail, however, they are generally tolerated.
As a channel accumulates a greater number of messages, the likelihood of the application receiving them in the wrong order becomes greater.
This results in older sessions becoming less reliable.
This function accepts the name of the channel in which the user is interested, the handler for each message, and the callback to be executed when the last message has been fetched and handled.
**Note**, the callback is a new addition to this API.
It is only implemented within the leveldb adaptor, making our latest code incompatible with the other back ends.
While we migrate to our new Netflux API, only the leveldb adaptor will be supported.
## removeChannel(channelName, callback)
This method is called (optionally, see config.example.js for more info) some amount of time after the last client in a channel disconnects.
It should remove any history of that channel, and execute a callback which takes an error message as an argument.
## Documenting your adaptor
Naturally, you should comment your code well before making a PR.
Failing that, you should definitely add notes to `cryptpad/config.example.js` such that people who wish to install your adaptor know how to do so.
Notes on how to install the back end, as well as how to install the client for connecting to the back end (as is the case with many datastores), as well as how to configure cryptpad to use your adaptor.
The current configuration file should serve as an example of what to add, and how to comment.

628
storage/blob.js

@ -1,628 +0,0 @@
/* globals Buffer */
var Fs = require("fs");
var Fse = require("fs-extra");
var Path = require("path");
var BlobStore = module.exports;
var nThen = require("nthen");
var Semaphore = require("saferphore");
var Util = require("../lib/common-util");
var isValidSafeKey = function (safeKey) {
return typeof(safeKey) === 'string' && !/\//.test(safeKey) && safeKey.length === 44;
};
var isValidId = function (id) {
return typeof(id) === 'string' && id.length === 48 && !/[^a-f0-9]/.test(id);
};
// helpers
var prependArchive = function (Env, path) {
return Path.join(Env.archivePath, path);
};
// /blob/<safeKeyPrefix>/<safeKey>/<blobPrefix>/<blobId>
var makeBlobPath = function (Env, blobId) {
return Path.join(Env.blobPath, blobId.slice(0, 2), blobId);
};
// /blobstate/<safeKeyPrefix>/<safeKey>
var makeStagePath = function (Env, safeKey) {
return Path.join(Env.blobStagingPath, safeKey.slice(0, 2), safeKey);
};
// /blob/<safeKeyPrefix>/<safeKey>/<blobPrefix>/<blobId>
var makeProofPath = function (Env, safeKey, blobId) {
return Path.join(Env.blobPath, safeKey.slice(0, 3), safeKey, blobId.slice(0, 2), blobId);
};
var parseProofPath = function (path) {
var parts = path.split('/');
return {
blobId: parts[parts.length -1],
safeKey: parts[parts.length - 3],
};
};
// getUploadSize: used by
// getFileSize
var getUploadSize = function (Env, blobId, cb) {
var path = makeBlobPath(Env, blobId);
if (!path) { return cb('INVALID_UPLOAD_ID'); }
Fs.stat(path, function (err, stats) {
if (err) {
// if a file was deleted, its size is 0 bytes
if (err.code === 'ENOENT') { return cb(void 0, 0); }
return void cb(err.code);
}
cb(void 0, stats.size);
});
};
// isFile: used by
// removeOwnedBlob
// uploadComplete
// uploadStatus
var isFile = function (filePath, cb) {
Fs.stat(filePath, function (e, stats) {
if (e) {
if (e.code === 'ENOENT') { return void cb(void 0, false); }
return void cb(e.message);
}
return void cb(void 0, stats.isFile());
});
};
var makeFileStream = function (full, _cb) {
var cb = Util.once(Util.mkAsync(_cb));
Fse.mkdirp(Path.dirname(full), function (e) {
if (e || !full) { // !full for pleasing flow, it's already checked
return void cb(e ? e.message : 'INTERNAL_ERROR');
}
try {
var stream = Fs.createWriteStream(full, {
flags: 'a',
encoding: 'binary',
highWaterMark: Math.pow(2, 16),
});
stream.on('open', function () {
cb(void 0, stream);
});
stream.on('error', function (err) {
cb(err);
});
} catch (err) {
cb('BAD_STREAM');
}
});
};
/********** METHODS **************/
var upload = function (Env, safeKey, content, cb) {
var dec;
try { dec = Buffer.from(content, 'base64'); }
catch (e) { return void cb('DECODE_BUFFER'); }
var len = dec.length;
var session = Env.getSession(safeKey);
if (typeof(session.currentUploadSize) !== 'number' ||
typeof(session.pendingUploadSize) !== 'number') {
// improperly initialized... maybe they didn't check before uploading?
// reject it, just in case
return cb('NOT_READY');
}
if (session.currentUploadSize > session.pendingUploadSize) {
return cb('E_OVER_LIMIT');
}
var stagePath = makeStagePath(Env, safeKey);
if (!session.blobstage) {
makeFileStream(stagePath, function (e, stream) {
if (!stream) { return void cb(e); }
var blobstage = session.blobstage = stream;
blobstage.write(dec);
session.currentUploadSize += len;
cb(void 0, dec.length);
});
} else {
session.blobstage.write(dec);
session.currentUploadSize += len;
cb(void 0, dec.length);
}
};
// upload_cancel
var upload_cancel = function (Env, safeKey, fileSize, cb) {
var session = Env.getSession(safeKey);
session.pendingUploadSize = fileSize;
session.currentUploadSize = 0;
if (session.blobstage) {
session.blobstage.close();
delete session.blobstage;
}
var path = makeStagePath(Env, safeKey);
Fs.unlink(path, function (e) {
if (e) { return void cb('E_UNLINK'); }
cb(void 0);
});
};
// upload_complete
var upload_complete = function (Env, safeKey, id, cb) {
var session = Env.getSession(safeKey);
if (session.blobstage && session.blobstage.close) {
session.blobstage.close();
delete session.blobstage;
}
var oldPath = makeStagePath(Env, safeKey);
var newPath = makeBlobPath(Env, id);
nThen(function (w) {
// make sure the path to your final location exists
Fse.mkdirp(Path.dirname(newPath), function (e) {
if (e) {
w.abort();
return void cb('RENAME_ERR');
}
});
}).nThen(function (w) {
// make sure there's not already something in that exact location
isFile(newPath, function (e, yes) {
if (e) {
w.abort();
return void cb(e);
}
if (yes) {
w.abort();
return void cb('RENAME_ERR');
}
cb(void 0, newPath, id);
});
}).nThen(function () {
// finally, move the old file to the new path
// FIXME we could just move and handle the EEXISTS instead of the above block
Fse.move(oldPath, newPath, function (e) {
if (e) { return void cb('RENAME_ERR'); }
cb(void 0, id);
});
});
};
var tryId = function (path, cb) {
Fs.access(path, Fs.constants.R_OK | Fs.constants.W_OK, function (e) {
if (!e) {
// generate a new id (with the same prefix) and recurse
//WARN('ownedUploadComplete', 'id is already used '+ id);
return void cb('EEXISTS');
} else if (e.code === 'ENOENT') {
// no entry, so it's safe for us to proceed
return void cb();
} else {
// it failed in an unexpected way. log it
//WARN('ownedUploadComplete', e);
return void cb(e.code);
}
});
};
// owned_upload_complete
var owned_upload_complete = function (Env, safeKey, id, cb) {
var session = Env.getSession(safeKey);
// the file has already been uploaded to the staging area
// close the pending writestream
if (session.blobstage && session.blobstage.close) {
session.blobstage.close();
delete session.blobstage;
}
if (!isValidId(id)) {
//WARN('ownedUploadComplete', "id is invalid");
return void cb('EINVAL_ID');
}
var oldPath = makeStagePath(Env, safeKey);
if (typeof(oldPath) !== 'string') {
return void cb('EINVAL_CONFIG');
}
var finalPath = makeBlobPath(Env, id);
var finalOwnPath = makeProofPath(Env, safeKey, id);
// the user wants to move it into blob and create a empty file with the same id
// in their own space:
// /blob/safeKeyPrefix/safeKey/blobPrefix/blobID
nThen(function (w) {
// make the requisite directory structure using Mkdirp
Fse.mkdirp(Path.dirname(finalPath), w(function (e /*, path */) {
if (e) { // does not throw error if the directory already existed
w.abort();
return void cb(e.code);
}
}));
Fse.mkdirp(Path.dirname(finalOwnPath), w(function (e /*, path */) {
if (e) { // does not throw error if the directory already existed
w.abort();
return void cb(e.code);
}
}));
}).nThen(function (w) {
// make sure the id does not collide with another
tryId(finalPath, w(function (e) {
if (e) {
w.abort();
return void cb(e);
}
}));
}).nThen(function (w) {
// Create the empty file proving ownership
Fs.writeFile(finalOwnPath, '', w(function (e) {
if (e) {
w.abort();
return void cb(e.code);
}
// otherwise it worked...
}));
}).nThen(function (w) {
// move the existing file to its new path
Fse.move(oldPath, finalPath, w(function (e) {
if (e) {
// if there's an error putting the file into its final location...
// ... you should remove the ownership file
Fs.unlink(finalOwnPath, function () {
// but if you can't, it's not catestrophic
// we can clean it up later
});
w.abort();
return void cb(e.code);
}
// otherwise it worked...
}));
}).nThen(function () {
// clean up their session when you're done
// call back with the blob id...
cb(void 0, id);
});
};
// removeBlob
var remove = function (Env, blobId, cb) {
var blobPath = makeBlobPath(Env, blobId);
Fs.unlink(blobPath, cb); // TODO COLDSTORAGE
};
// removeProof
var removeProof = function (Env, safeKey, blobId, cb) {
var proofPath = makeProofPath(Env, safeKey, blobId);
Fs.unlink(proofPath, cb);
};
// isOwnedBy(id, safeKey)
var isOwnedBy = function (Env, safeKey, blobId, cb) {
var proofPath = makeProofPath(Env, safeKey, blobId);
isFile(proofPath, cb);
};
// archiveBlob
var archiveBlob = function (Env, blobId, cb) {
var blobPath = makeBlobPath(Env, blobId);
var archivePath = prependArchive(Env, blobPath);
Fse.move(blobPath, archivePath, { overwrite: true }, cb);
};
var removeArchivedBlob = function (Env, blobId, cb) {
var archivePath = prependArchive(Env, makeBlobPath(Env, blobId));
Fs.unlink(archivePath, cb);
};
// restoreBlob
var restoreBlob = function (Env, blobId, cb) {
var blobPath = makeBlobPath(Env, blobId);
var archivePath = prependArchive(Env, blobPath);
Fse.move(archivePath, blobPath, cb);
};
// archiveProof
var archiveProof = function (Env, safeKey, blobId, cb) {
var proofPath = makeProofPath(Env, safeKey, blobId);
var archivePath = prependArchive(Env, proofPath);
Fse.move(proofPath, archivePath, { overwrite: true }, cb);
};
var removeArchivedProof = function (Env, safeKey, blobId, cb) {
var archivedPath = prependArchive(Env, makeProofPath(Env, safeKey, blobId));
Fs.unlink(archivedPath, cb);
};
// restoreProof
var restoreProof = function (Env, safeKey, blobId, cb) {
var proofPath = makeProofPath(Env, safeKey, blobId);
var archivePath = prependArchive(Env, proofPath);
Fse.move(archivePath, proofPath, cb);
};
var makeWalker = function (n, handleChild, done) {
if (!n || typeof(n) !== 'number' || n < 2) { n = 2; }
var W;
nThen(function (w) {
// this asynchronous bit defers the completion of this block until
// synchronous execution has completed. This means you must create
// the walker and start using it synchronously or else it will call back
// prematurely
setTimeout(w());
W = w;
}).nThen(function () {
done();
});
// do no more than 20 jobs at a time
var tasks = Semaphore.create(n);
var recurse = function (path) {
tasks.take(function (give) {
var next = give(W());
nThen(function (w) {
// check if the path is a directory...
Fs.stat(path, w(function (err, stats) {
if (err) { return next(); }
if (!stats.isDirectory()) {
w.abort();
return void handleChild(void 0, path, next);
}
// fall through
}));
}).nThen(function () {
// handle directories
Fs.readdir(path, function (err, dir) {
if (err) { return next(); }
// everything is fine and it's a directory...
dir.forEach(function (d) {
recurse(Path.join(path, d));
});
next();
});
});
});
};
return recurse;
};
var listProofs = function (root, handler, cb) {
Fs.readdir(root, function (err, dir) {
if (err) { return void cb(err); }
var walk = makeWalker(20, function (err, path, next) {
// path is the path to a child node on the filesystem
// next handles the next job in a queue
// iterate over proofs
// check for presence of corresponding files
Fs.stat(path, function (err, stats) {
if (err) {
return void handler(err, void 0, next);
}
var parsed = parseProofPath(path);
handler(void 0, {
path: path,
blobId: parsed.blobId,
safeKey: parsed.safeKey,
atime: stats.atime,
ctime: stats.ctime,
mtime: stats.mtime,
}, next);
});
}, function () {
// called when there are no more directories or children to process
cb();
});
dir.forEach(function (d) {
// ignore directories that aren't 3 characters long...
if (d.length !== 3) { return; }
walk(Path.join(root, d));
});
});
};
var listBlobs = function (root, handler, cb) {
// iterate over files
Fs.readdir(root, function (err, dir) {
if (err) { return void cb(err); }
var walk = makeWalker(20, function (err, path, next) {
Fs.stat(path, function (err, stats) {
if (err) {
return void handler(err, void 0, next);
}
handler(void 0, {
blobId: Path.basename(path),
atime: stats.atime,
ctime: stats.ctime,
mtime: stats.mtime,
}, next);
});
}, function () {
cb();
});
dir.forEach(function (d) {
if (d.length !== 2) { return; }
walk(Path.join(root, d));
});
});
};
BlobStore.create = function (config, _cb) {
var cb = Util.once(Util.mkAsync(_cb));
if (typeof(config.getSession) !== 'function') {
return void cb("getSession method required");
}
var Env = {
blobPath: config.blobPath || './blob',
blobStagingPath: config.blobStagingPath || './blobstage',
archivePath: config.archivePath || './data/archive',
getSession: config.getSession,
};
nThen(function (w) {
var CB = Util.both(w.abort, cb);
Fse.mkdirp(Env.blobPath, w(function (e) {
if (e) { CB(e); }
}));
Fse.mkdirp(Env.blobStagingPath, w(function (e) {
if (e) { CB(e); }
}));
Fse.mkdirp(Path.join(Env.archivePath, Env.blobPath), w(function (e) {
if (e) { CB(e); }
}));
}).nThen(function () {
var methods = {
isFileId: isValidId,
status: function (safeKey, _cb) {
// TODO check if the final destination is a file
// because otherwise two people can try to upload to the same location
// and one will fail, invalidating their hard work
var cb = Util.once(Util.mkAsync(_cb));
if (!isValidSafeKey(safeKey)) { return void cb('INVALID_SAFEKEY'); }
isFile(makeStagePath(Env, safeKey), cb);
},
upload: function (safeKey, content, _cb) {
var cb = Util.once(Util.mkAsync(_cb));
if (!isValidSafeKey(safeKey)) { return void cb('INVALID_SAFEKEY'); }
upload(Env, safeKey, content, Util.once(Util.mkAsync(cb)));
},
cancel: function (safeKey, fileSize, _cb) {
var cb = Util.once(Util.mkAsync(_cb));
if (!isValidSafeKey(safeKey)) { return void cb('INVALID_SAFEKEY'); }
if (typeof(fileSize) !== 'number' || isNaN(fileSize) || fileSize <= 0) { return void cb("INVALID_FILESIZE"); }
upload_cancel(Env, safeKey, fileSize, cb);
},
isOwnedBy: function (safeKey, blobId, _cb) {
var cb = Util.once(Util.mkAsync(_cb));
if (!isValidSafeKey(safeKey)) { return void cb('INVALID_SAFEKEY'); }
isOwnedBy(Env, safeKey, blobId, cb);
},
remove: {
blob: function (blobId, _cb) {
var cb = Util.once(Util.mkAsync(_cb));
if (!isValidId(blobId)) { return void cb("INVALID_ID"); }
remove(Env, blobId, cb);
},
proof: function (safeKey, blobId, _cb) {
var cb = Util.once(Util.mkAsync(_cb));
if (!isValidSafeKey(safeKey)) { return void cb('INVALID_SAFEKEY'); }
if (!isValidId(blobId)) { return void cb("INVALID_ID"); }
removeProof(Env, safeKey, blobId, cb);
},
archived: {
blob: function (blobId, _cb) {
var cb = Util.once(Util.mkAsync(_cb));
if (!isValidId(blobId)) { return void cb("INVALID_ID"); }
removeArchivedBlob(Env, blobId, cb);
},
proof: function (safeKey, blobId, _cb) {
var cb = Util.once(Util.mkAsync(_cb));
if (!isValidSafeKey(safeKey)) { return void cb('INVALID_SAFEKEY'); }
if (!isValidId(blobId)) { return void cb("INVALID_ID"); }
removeArchivedProof(Env, safeKey, blobId, cb);
},
},
},
archive: {
blob: function (blobId, _cb) {
var cb = Util.once(Util.mkAsync(_cb));
if (!isValidId(blobId)) { return void cb("INVALID_ID"); }
archiveBlob(Env, blobId, cb);
},
proof: function (safeKey, blobId, _cb) {
var cb = Util.once(Util.mkAsync(_cb));
if (!isValidSafeKey(safeKey)) { return void cb('INVALID_SAFEKEY'); }
if (!isValidId(blobId)) { return void cb("INVALID_ID"); }
archiveProof(Env, safeKey, blobId, cb);
},
},
restore: {
blob: function (blobId, _cb) {
var cb = Util.once(Util.mkAsync(_cb));
if (!isValidId(blobId)) { return void cb("INVALID_ID"); }
restoreBlob(Env, blobId, cb);
},
proof: function (safeKey, blobId, _cb) {
var cb = Util.once(Util.mkAsync(_cb));
if (!isValidSafeKey(safeKey)) { return void cb('INVALID_SAFEKEY'); }
if (!isValidId(blobId)) { return void cb("INVALID_ID"); }
restoreProof(Env, safeKey, blobId, cb);
},
},
complete: function (safeKey, id, _cb) {
var cb = Util.once(Util.mkAsync(_cb));
if (!isValidSafeKey(safeKey)) { return void cb('INVALID_SAFEKEY'); }
if (!isValidId(id)) { return void cb("INVALID_ID"); }
upload_complete(Env, safeKey, id, cb);
},
completeOwned: function (safeKey, id, _cb) {
var cb = Util.once(Util.mkAsync(_cb));
if (!isValidSafeKey(safeKey)) { return void cb('INVALID_SAFEKEY'); }
if (!isValidId(id)) { return void cb("INVALID_ID"); }
owned_upload_complete(Env, safeKey, id, cb);
},
size: function (id, _cb) {
var cb = Util.once(Util.mkAsync(_cb));
if (!isValidId(id)) { return void cb("INVALID_ID"); }
getUploadSize(Env, id, cb);
},
list: {
blobs: function (handler, _cb) {
var cb = Util.once(Util.mkAsync(_cb));
listBlobs(Env.blobPath, handler, cb);
},
proofs: function (handler, _cb) {
var cb = Util.once(Util.mkAsync(_cb));
listProofs(Env.blobPath, handler, cb);
},
archived: {
proofs: function (handler, _cb) {
var cb = Util.once(Util.mkAsync(_cb));
listProofs(prependArchive(Env, Env.blobPath), handler, cb);
},
blobs: function (handler, _cb) {
var cb = Util.once(Util.mkAsync(_cb));
listBlobs(prependArchive(Env, Env.blobPath), handler, cb);
},
}
},
};
cb(void 0, methods);
});
};

1053
storage/file.js
File diff suppressed because it is too large
View File

413
storage/tasks.js

@ -1,413 +0,0 @@
var Fs = require("fs");
var Fse = require("fs-extra");
var Path = require("path");
var nacl = require("tweetnacl/nacl-fast");
var nThen = require("nthen");
var Tasks = module.exports;
var tryParse = function (s) {
try { return JSON.parse(s); }
catch (e) { return null; }
};
var encode = function (time, command, args) {
if (typeof(time) !== 'number') { return null; }
if (typeof(command) !== 'string') { return null; }
if (!Array.isArray(args)) { return [time, command]; }
return [time, command].concat(args);
};
/*
var randomId = function () {
var bytes = Array.prototype.slice.call(nacl.randomBytes(16));
return bytes.map(function (b) {
var n = Number(b & 0xff).toString(16);
return n.length === 1? '0' + n: n;
}).join('');
};
var mkPath = function (env, id) {
return Path.join(env.root, id.slice(0, 2), id) + '.ndjson';
};
*/
// make a new folder every MODULUS ms
var MODULUS = 1000 * 60 * 60 * 24; // one day
var moduloTime = function (d) {
return d - (d % MODULUS);
};
var makeDirectoryId = function (d) {
return '' + moduloTime(d);
};
var write = function (env, task, cb) {
var str = JSON.stringify(task) + '\n';
var id = nacl.util.encodeBase64(nacl.hash(nacl.util.decodeUTF8(str))).replace(/\//g, '-');
var dir = makeDirectoryId(task[0]);
var path = Path.join(env.root, dir);
nThen(function (w) {
// create the parent directory if it does not exist
Fse.mkdirp(path, 0x1ff, w(function (err) {
if (err) {
w.abort();
return void cb(err);
}
}));
}).nThen(function () {
// write the file to the path
var fullPath = Path.join(path, id + '.ndjson');
// the file ids are based on the hash of the file contents to be written
// as such, writing an exact task a second time will overwrite the first with the same contents
// this shouldn't be a problem
Fs.writeFile(fullPath, str, function (e) {
if (e) {
env.log.error("TASK_WRITE_FAILURE", {
error: e,
path: fullPath,
});
return void cb(e);
}
env.log.info("SUCCESSFUL_WRITE", {
path: fullPath,
});
cb();
});
});
};
var remove = function (env, path, cb) {
// FIXME COLDSTORAGE?
Fs.unlink(path, cb);
};
var removeDirectory = function (env, path, cb) {
Fs.rmdir(path, cb);
};
var list = Tasks.list = function (env, cb, migration) {
var rootDirs;
nThen(function (w) {
// read the root directory
Fs.readdir(env.root, w(function (e, list) {
if (e) {
env.log.error("TASK_ROOT_DIR", {
root: env.root,
error: e,
});
return void cb(e);
}
if (list.length === 0) {
w.abort();
return void cb(void 0, []);
}
rootDirs = list;
}));
}).nThen(function () {
// schedule the nested directories for exploration
// return a list of paths to tasks
var queue = nThen(function () {});
var allPaths = [];
var currentWindow = moduloTime(+new Date() + MODULUS);
// We prioritize a small footprint over speed, so we
// iterate over directories in serial rather than parallel
rootDirs.forEach(function (dir) {
// if a directory is two characters, it's the old format
// otherwise, it indicates when the file is set to expire
// so we can ignore directories which are clearly in the future
var dirTime;
if (migration) {
// this block handles migrations. ignore new formats
if (dir.length !== 2) {
return;
}
} else {
// not in migration mode, check if it's a new format
if (dir.length >= 2) {
// might be the new format.
// check its time to see if it should be skipped
dirTime = parseInt(dir);
if (!isNaN(dirTime) && dirTime >= currentWindow) {
return;
}
}
}
queue.nThen(function (w) {
var subPath = Path.join(env.root, dir);
Fs.readdir(subPath, w(function (e, paths) {
if (e) {
env.log.error("TASKS_INVALID_SUBDIR", {
path: subPath,
error: e,
});
return;
}
if (paths.length === 0) {
removeDirectory(env, subPath, function (err) {
if (err) {
env.log.error('TASKS_REMOVE_EMPTY_DIRECTORY', {
error: err,
path: subPath,
});
}
});
}
// concat in place
Array.prototype.push.apply(allPaths, paths.map(function (p) {
return Path.join(subPath, p);
}));
}));
});
});
queue.nThen(function () {
cb(void 0, allPaths);
});
});
};
var read = function (env, filePath, cb) {
Fs.readFile(filePath, 'utf8', function (e, str) {
if (e) { return void cb(e); }
var task = tryParse(str);
if (!Array.isArray(task) || task.length < 2) {
env.log("INVALID_TASK", {
path: filePath,
task: task,
});
return cb(new Error('INVALID_TASK'));
}
cb(void 0, task);
});
};
var expire = function (env, task, cb) {
// TODO magic numbers, maybe turn task parsing into a function
// and also maybe just encode tasks in a better format to start...
var Log = env.log;
var args = task.slice(2);
if (!env.retainData) {
Log.info('DELETION_SCHEDULED_EXPIRATION', {
task: task,
});
env.store.removeChannel(args[0], function (err) {
if (err) {
Log.error('DELETION_SCHEDULED_EXPIRATION_ERROR', {
task: task,
error: err,
});
}
cb();
});
return;
}
Log.info('ARCHIVAL_SCHEDULED_EXPIRATION', {
task: task,
});
env.store.archiveChannel(args[0], function (err) {
if (err) {
Log.error('ARCHIVE_SCHEDULED_EXPIRATION_ERROR', {
task: task,
error: err,
});
}
cb();
});
};
var run = Tasks.run = function (env, path, cb) {
var CURRENT = +new Date();
var Log = env.log;
var task, time, command, args;
nThen(function (w) {
read(env, path, w(function (err, _task) {
if (err) {
w.abort();
// there was a file but it wasn't valid?
return void cb(err);
}
task = _task;
time = task[0];
if (time > CURRENT) {
w.abort();
return cb();
}
command = task[1];
args = task.slice(2);
}));
}).nThen(function (w) {
switch (command) {
case 'EXPIRE':
return void expire(env, task, w());
default:
Log.warn("TASKS_UNKNOWN_COMMAND", task);
}
}).nThen(function () {
// remove the task file...
remove(env, path, function (err) {
if (err) {
Log.error('TASKS_RECORD_REMOVAL', {
path: path,
err: err,
});
}
cb();
});
});
};
var runAll = function (env, cb) {
// check if already running and bail out if so
if (env.running) {
return void cb("TASK_CONCURRENCY");
}
// if not, set a flag to block concurrency and proceed
env.running = true;
var paths;
nThen(function (w) {
list(env, w(function (err, _paths) {
if (err) {
w.abort();
env.running = false;
return void cb(err);
}
paths = _paths;
}));
}).nThen(function (w) {
var done = w();
var nt = nThen(function () {});
paths.forEach(function (path) {
nt = nt.nThen(function (w) {
run(env, path, w(function (err) {
if (err) {
// Any errors are already logged in 'run'
// the admin will need to review the logs and clean up
}
}));
});
});
nt = nt.nThen(function () {
done();
});
}).nThen(function (/*w*/) {
env.running = false;
cb();
});
};
var migrate = function (env, cb) {
// list every task
list(env, function (err, paths) {
if (err) {
return void cb(err);
}
var nt = nThen(function () {});
paths.forEach(function (path) {
var bypass;
var task;
nt = nt.nThen(function (w) {
// read
read(env, path, w(function (err, _task) {
if (err) {
bypass = true;
env.log.error("TASK_MIGRATION_READ", {
error: err,
path: path,
});
return;
}
task = _task;
}));
}).nThen(function (w) {
if (bypass) { return; }
// rewrite in new format
write(env, task, w(function (err) {
if (err) {
bypass = true;
env.log.error("TASK_MIGRATION_WRITE", {
error: err,
task: task,
});
}
}));
}).nThen(function (w) {
if (bypass) { return; }
// remove
remove(env, path, w(function (err) {
if (err) {
env.log.error("TASK_MIGRATION_REMOVE", {
error: err,
path: path,
});
}
}));
});
});
nt = nt.nThen(function () {
cb();
});
}, true);
};
Tasks.create = function (config, cb) {
if (!config.store) { throw new Error("E_STORE_REQUIRED"); }
if (!config.log) { throw new Error("E_LOG_REQUIRED"); }
var env = {
root: config.taskPath || './tasks',
log: config.log,
store: config.store,
retainData: Boolean(config.retainData),
};
// make sure the path exists...
Fse.mkdirp(env.root, 0x1ff, function (err) {
if (err) { return void cb(err); }
cb(void 0, {
write: function (time, command, args, cb) {
var task = encode(time, command, args);
write(env, task, cb);
},
list: function (olderThan, cb) {
list(env, olderThan, cb);
},
remove: function (id, cb) {
remove(env, id, cb);
},
run: function (id, cb) {
run(env, id, cb);
},
runAll: function (cb) {
runAll(env, cb);
},
migrate: function (cb) {
migrate(env, cb);
},
});
});
};

24
www/admin/app-admin.less

@ -23,5 +23,29 @@
display: flex;
flex-flow: column;
}
.cp-support-list-actions {
margin: 10px 0px 10px 2px;
}
.cp-support-list-ticket:not(.cp-support-list-closed) {
.cp-support-list-message {
&:last-child:not(.cp-support-fromadmin) {
color: @colortheme_cp-red;
background-color: lighten(@colortheme_cp-red, 25%);
.cp-support-showdata {
background-color: lighten(@colortheme_cp-red, 30%);
}
}
}
}
.cp-support-fromadmin {
color: @colortheme_logo-2;
background-color: #FFF;
.cp-support-message-content {
color: @colortheme_logo-2;
}
}
}

2
www/admin/index.html

@ -6,7 +6,7 @@
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<meta name="referrer" content="no-referrer" />
<script async data-bootload="main.js" data-main="/common/boot.js?ver=1.0" src="/bower_components/requirejs/require.js?ver=2.3.5"></script>
<link href="/customize/src/outer.css" rel="stylesheet" type="text/css">
<link href="/customize/src/outer.css?ver=1.1" rel="stylesheet" type="text/css">
</head>
<body>
<iframe id="sbox-iframe">

29
www/admin/inner.js

@ -43,6 +43,7 @@ define([
'stats': [
'cp-admin-active-sessions',
'cp-admin-active-pads',
'cp-admin-open-files',
'cp-admin-registered',
'cp-admin-disk-usage',
],
@ -119,6 +120,17 @@ define([
});
return $div;
};
create['open-files'] = function () {
var key = 'open-files';
var $div = makeBlock(key);
sFrameChan.query('Q_ADMIN_RPC', {
cmd: 'GET_FILE_DESCRIPTOR_COUNT',
}, function (e, data) {
console.log(e, data);
$div.append(h('pre', String(data)));
});
return $div;
};
create['registered'] = function () {
var key = 'registered';
var $div = makeBlock(key);
@ -172,6 +184,12 @@ define([
if (!supportKey || !APP.privateKey) { return; }
var $container = makeBlock('support-list');
var $div = $(h('div.cp-support-container')).appendTo($container);
var metadataMgr = common.getMetadataMgr();
var privateData = metadataMgr.getPrivateData();
var cat = privateData.category || '';
var linkedId = cat.indexOf('-') !== -1 && cat.slice(8);
var hashesById = {};
var reorder = function () {
@ -200,6 +218,12 @@ define([
});
};
var to = Util.throttle(function () {
var $ticket = $div.find('.cp-support-list-ticket[data-id="'+linkedId+'"]');
$ticket[0].scrollIntoView();
linkedId = undefined;
}, 100);
// Register to the "support" mailbox
common.mailbox.subscribe(['supportadmin'], {
onMessage: function (data) {
@ -246,6 +270,8 @@ define([
}
$ticket.append(APP.support.makeMessage(content, hash));
reorder();
if (linkedId) { to(); }
}
});
return $container;
@ -312,6 +338,9 @@ define([
var metadataMgr = common.getMetadataMgr();
var privateData = metadataMgr.getPrivateData();
var active = privateData.category || 'general';
if (active.indexOf('-') !== -1) {
active = active.split('-')[0];
}
common.setHash(active);
Object.keys(categories).forEach(function (key) {
var $category = $('<div>', {'class': 'cp-sidebarlayout-category'}).appendTo($categories);

42
www/assert/main.js

@ -254,12 +254,46 @@ define([
!secret.hashData.present);
}, "test support for trailing slashes in version 1 hash failed to parse");
// test support for ownerKey
assert(function (cb) {
var secret = Hash.parsePadUrl('/invite/#/1/ilrOtygzDVoUSRpOOJrUuQ/e8jvf36S3chzkkcaMrLSW7PPrz7VDp85lIFNI26dTmr=/');
var secret = Hash.parsePadUrl('/pad/#/1/edit/3Ujt4F2Sjnjbis6CoYWpoQ/usn4+9CqVja8Q7RZOGTfRgqI/present/uPmJDtDJ9okhdIyQ-8zphYlpaAonJDOC6MAcYY6iBwWBQr+XmrQ9uGY9WkApJTfEfAu5QcqaDCw1Ul+JXKcYkA/embed');
return cb(secret.hashData.version === 1 &&
secret.hashData.mode === "edit" &&
secret.hashData.channel === "3Ujt4F2Sjnjbis6CoYWpoQ" &&
secret.hashData.key === "usn4+9CqVja8Q7RZOGTfRgqI" &&
secret.hashData.ownerKey === "uPmJDtDJ9okhdIyQ-8zphYlpaAonJDOC6MAcYY6iBwWBQr+XmrQ9uGY9WkApJTfEfAu5QcqaDCw1Ul+JXKcYkA" &&
secret.hashData.embed &&
secret.hashData.present);
}, "test support for owner key in version 1 hash failed to parse");
assert(function (cb) {
var parsed = Hash.parsePadUrl('/pad/#/2/pad/edit/oRE0oLCtEXusRDyin7GyLGcS/p/uPmJDtDJ9okhdIyQ-8zphYlpaAonJDOC6MAcYY6iBwWBQr+XmrQ9uGY9WkApJTfEfAu5QcqaDCw1Ul+JXKcYkA/embed');
var secret = Hash.getSecrets('pad', parsed.hash);
return cb(parsed.hashData.version === 2 &&
parsed.hashData.mode === "edit" &&
parsed.hashData.type === "pad" &&
parsed.hashData.key === "oRE0oLCtEXusRDyin7GyLGcS" &&
secret.channel === "d8d51b4aea863f3f050f47f8ad261753" &&
window.nacl.util.encodeBase64(secret.keys.cryptKey) === "0Ts1M6VVEozErV2Nx/LTv6Im5SCD7io2LlhasyyBPQo=" &&
secret.keys.validateKey === "f5A1FM9Gp55tnOcM75RyHD1oxBG9ZPh9WDA7qe2Fvps=" &&
parsed.hashData.ownerKey === "uPmJDtDJ9okhdIyQ-8zphYlpaAonJDOC6MAcYY6iBwWBQr+XmrQ9uGY9WkApJTfEfAu5QcqaDCw1Ul+JXKcYkA" &&
parsed.hashData.embed &&
parsed.hashData.password);
}, "test support for owner key in version 2 hash failed to parse");
assert(function (cb) {
var secret = Hash.parsePadUrl('/file/#/1/TRplGM-WsVkXR+LkJ0tD3D45A1YFZ-Cy/eO4RJwh8yHEEDhl1aHfuwQ2IzosPBZx-HDaWc1lW+hY=/uPmJDtDJ9okhdIyQ-8zphYlpaAonJDOC6MAcYY6iBwWBQr+XmrQ9uGY9WkApJTfEfAu5QcqaDCw1Ul+JXKcYkA/');
return cb(secret.hashData.version === 1 &&
secret.hashData.channel === "TRplGM/WsVkXR+LkJ0tD3D45A1YFZ/Cy" &&
secret.hashData.key === "eO4RJwh8yHEEDhl1aHfuwQ2IzosPBZx/HDaWc1lW+hY=" &&
secret.hashData.ownerKey === "uPmJDtDJ9okhdIyQ-8zphYlpaAonJDOC6MAcYY6iBwWBQr+XmrQ9uGY9WkApJTfEfAu5QcqaDCw1Ul+JXKcYkA" &&
!secret.hashData.present);
}, "test support for owner key in version 1 file hash failed to parse");
assert(function (cb) {
var secret = Hash.parsePadUrl('/invite/#/2/invite/edit/oRE0oLCtEXusRDyin7GyLGcS/p/');
var hd = secret.hashData;
cb(hd.channel === "ilrOtygzDVoUSRpOOJrUuQ" &&
hd.pubkey === "e8jvf36S3chzkkcaMrLSW7PPrz7VDp85lIFNI26dTmr=" &&
hd.type === 'invite');
cb(hd.key === "oRE0oLCtEXusRDyin7GyLGcS" &&
hd.password &&
hd.app === 'invite');
}, "test support for invite urls");
// test support for V2

41
www/auth/main.js

@ -1,8 +1,10 @@
define([
'jquery',
'/api/config',
'/common/cryptget.js',
'/common/pinpad.js',
'/common/common-constants.js',
'/common/common-hash.js',
'/common/outer/local-store.js',
'/common/outer/login-block.js',
'/common/outer/network-config.js',
@ -11,7 +13,7 @@ define([
'/bower_components/nthen/index.js',
'/bower_components/netflux-websocket/netflux-client.js',
'/bower_components/tweetnacl/nacl-fast.min.js'
], function ($, Crypt, Pinpad, Constants, LocalStore, Block, NetConfig, Login, Test, nThen, Netflux) {
], function ($, ApiConfig, Crypt, Pinpad, Constants, Hash, LocalStore, Block, NetConfig, Login, Test, nThen, Netflux) {
var Nacl = window.nacl;
var signMsg = function (msg, privKey) {
@ -35,6 +37,7 @@ define([
var rpc;
var network;
var rpcError;
var contacts = {};
var loadProxy = function (hash) {
nThen(function (waitFor) {
@ -63,6 +66,41 @@ define([
}), {
network: network
});
}).nThen(function () {
var origin = ApiConfig.fileHost || window.location.origin;
// Get contacts and extract their avatar channel and key
var getData = function (obj, href) {
var parsed = Hash.parsePadUrl(href);
if (!parsed || parsed.type !== "file") { return; }
var secret = Hash.getSecrets('file', parsed.hash);
if (!secret.keys || !secret.channel) { return; }
obj.avatarKey = Hash.encodeBase64(secret.keys && secret.keys.cryptKey);
obj.avatarSrc = origin + Hash.getBlobPathFromHex(secret.channel);
};
contacts.teams = proxy.teams || {};
contacts.friends = proxy.friends || {};
Object.keys(contacts.friends).map(function (key) {
var friend = contacts.friends[key];
if (!friend) { return; }
var ret = {
edPublic: friend.edPublic,
name: friend.displayName,
};
getData(ret, friend.avatar);
contacts.friends[key] = ret;
});
Object.keys(contacts.teams).map(function (key) {
var team = contacts.teams[key];
if (!team) { return; }
var avatar = team.metadata && team.metadata.avatar;
var ret = {
edPublic: team.keys && team.keys.drive && team.keys.drive.edPublic,
name: team.metadata && team.metadata.name
};
getData(ret, avatar);
contacts.teams[key] = ret;
});
contacts.origin = window.location.origin;
}).nThen(function (waitFor) {
if (!network) { return void waitFor.abort(); }
Pinpad.create(network, proxy, waitFor(function (e, call) {
@ -122,6 +160,7 @@ define([
edPublic: proxy.edPublic,
sig: sig
};
ret.contacts = contacts;
srcWindow.postMessage(JSON.stringify(ret), domain);
});
}

33
www/code/app-code.less

@ -65,47 +65,20 @@
box-sizing: border-box;
//font-family: Calibri,Ubuntu,sans-serif;
font: @colortheme_app-font;
word-wrap: break-word;
position: relative;
flex: 1;
h1, h2, h3, h4, h5, h6 {
font-weight: bold;
padding-bottom: 0.3em;
border-bottom: 1px solid #eee;
}
li {
min-height: 22px;
}
.markdown_main();
.markdown_cryptpad();
.todo-list-item {
list-style: none;
.fa {
position: absolute;
margin-left: -17px;
margin-top: 4px;
&.fa-check-square {
font-size: 15px;
margin-top: 5px;
}
}
}
media-tag {
* {
max-width:100%;
}
iframe[src$=".pdf"] {
width: 100%;
height: 80vh;
max-height: 90vh;
}
}
media-tag:empty {
width: 100px;
height: 100px;
display: inline-block;
border: 1px solid #BBB;
}
.markdown_main();
.cp-app-code-preview-empty {
display: none;
}

2
www/code/index.html

@ -6,7 +6,7 @@
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<meta name="referrer" content="no-referrer" />
<script async data-bootload="/common/sframe-app-outer.js" data-main="/common/boot.js?ver=1.0" src="/bower_components/requirejs/require.js?ver=2.3.5"></script>
<link href="/customize/src/outer.css" rel="stylesheet" type="text/css">
<link href="/customize/src/outer.css?ver=1.1" rel="stylesheet" type="text/css">
</head>
<body>
<iframe id="sbox-iframe">

117
www/code/inner.js

@ -3,16 +3,20 @@ define([
'/common/diffMarked.js',
'/bower_components/nthen/index.js',
'/common/sframe-common.js',
'/common/hyperscript.js',
'/common/sframe-app-framework.js',
'/common/sframe-common-codemirror.js',
'/common/common-interface.js',
'/common/common-util.js',
'/common/common-hash.js',
'/code/markers.js',
'/common/modes.js',
'/common/visible.js',
'/common/TypingTests.js',
'/customize/messages.js',
'cm/lib/codemirror',
'css!cm/lib/codemirror.css',
'css!cm/addon/dialog/dialog.css',
'css!cm/addon/fold/foldgutter.css',
@ -46,10 +50,13 @@ define([
DiffMd,
nThen,
SFCommon,
h,
Framework,
SFCodeMirror,
UI,
Util,
Hash,
Markers,
Modes,
Visible,
TypingTest,
@ -98,6 +105,7 @@ define([
};
var mkHelpMenu = function (framework) {
var $codeMirrorContainer = $('#cp-app-code-container');
$codeMirrorContainer.prepend(framework._.sfCommon.getBurnAfterReadingWarning());
var helpMenu = framework._.sfCommon.createHelpMenu(['text', 'code']);
$codeMirrorContainer.prepend(helpMenu.menu);
@ -169,6 +177,14 @@ define([
e.preventDefault();
var $a = $t.is('a') ? $t : $t.parents('a').first();
var href = $a.attr('href');
if (/^\/[^\/]/.test(href)) {
var privateData = framework._.cpNfInner.metadataMgr.getPrivateData();
href = privateData.origin + href;
} else if (/^#/.test(href)) {
var target = document.getElementById('cp-md-0-'+href.slice(1));
if (target) { target.scrollIntoView(); }
return;
}
framework._.sfCommon.openUnsafeURL(href);
}
});
@ -264,11 +280,60 @@ define([
};
};
var mkColorByAuthor = function (framework, markers) {
var common = framework._.sfCommon;
var $cbaButton = framework._.sfCommon.createButton(null, true, {
icon: 'fa-paint-brush',
text: Messages.cba_title,
name: 'cba'
}, function () {
var div = h('div');
var $div = $(div);
var content = h('div', [
h('h4', Messages.cba_properties),
h('p', Messages.cba_hint),
div
]);
var setButton = function (state) {
var button = h('button.btn');
var $button = $(button);
$div.html('').append($button);
if (state) {
// Add "enable" button
$button.addClass('btn-secondary').text(Messages.cba_enable);
UI.confirmButton(button, {
classes: 'btn-primary'
}, function () {
$button.remove();
markers.setState(true);
common.setAttribute(['code', 'enableColors'], true);
setButton(false);
});
return;
}
// Add "disable" button
$button.addClass('btn-danger-alt').text(Messages.cba_disable);
UI.confirmButton(button, {
classes: 'btn-danger'
}, function () {
$button.remove();
markers.setState(false);
common.setAttribute(['code', 'enableColors'], false);
setButton(true);
});
};
setButton(!markers.getState());
UI.alert(content);
});
framework._.toolbar.$drawer.append($cbaButton);
};
var mkFilePicker = function (framework, editor, evModeChange) {
evModeChange.reg(function (mode) {
if (MEDIA_TAG_MODES.indexOf(mode) !== -1) {
// Embedding is endabled
framework.setMediaTagEmbedder(function (mt) {
editor.focus();
editor.replaceSelection($(mt)[0].outerHTML);
});
} else {
@ -291,6 +356,20 @@ define([
var previewPane = mkPreviewPane(editor, CodeMirror, framework, isPresentMode);
var markdownTb = mkMarkdownTb(editor, framework);
var markers = Markers.create({
common: common,
framework: framework,
CodeMirror: CodeMirror,
devMode: privateData.devMode,
editor: editor
});
var $showAuthorColorsButton = framework._.sfCommon.createButton('', true, {
icon: 'fa-paint-brush',
}).hide();
framework._.toolbar.$rightside.append($showAuthorColorsButton);
markers.setButton($showAuthorColorsButton);
var $print = $('#cp-app-code-print');
var $content = $('#cp-app-code-preview-content');
mkPrintButton(framework, $content, $print);
@ -313,15 +392,23 @@ define([
CodeMirror.configureTheme(common);
}
////
framework.onContentUpdate(function (newContent) {
var highlightMode = newContent.highlightMode;
if (highlightMode && highlightMode !== CodeMirror.highlightMode) {
CodeMirror.setMode(highlightMode, evModeChange.fire);
}
// Fix the markers offsets
markers.checkMarks(newContent);
// Apply the text content
CodeMirror.contentUpdate(newContent);
previewPane.draw();
// Apply the markers
markers.setMarks();
framework.localChange();
});
framework.setContentGetter(function () {
@ -329,6 +416,10 @@ define([
var content = CodeMirror.getContent();
content.highlightMode = CodeMirror.highlightMode;
previewPane.draw();
markers.updateAuthorMarks();
content.authormarks = markers.getAuthorMarks();
return content;
});
@ -358,6 +449,19 @@ define([
//console.log("%s => %s", CodeMirror.highlightMode, CodeMirror.$language.val());
}
markers.ready();
common.getPadMetadata(null, function (md) {
if (md && md.error) { return; }
if (!Array.isArray(md.owners)) { return void markers.setState(false); }
if (!common.isOwned(md.owners)) { return; }
// We're the owner: add the button and enable the colors if needed
mkColorByAuthor(framework, markers);
if (newPad && Util.find(privateData, ['settings', 'code', 'enableColors'])) {
markers.setState(true);
}
});
var fmConfig = {
dropArea: $('.CodeMirror'),
body: $('body'),
@ -375,7 +479,7 @@ define([
});
framework.onDefaultContentNeeded(function () {
editor.setValue(''); //Messages.codeInitialState);
editor.setValue('');
});
framework.setFileExporter(CodeMirror.getContentExtension, CodeMirror.fileExporter);
@ -392,11 +496,14 @@ define([
framework.setNormalizer(function (c) {
return {
content: c.content,
highlightMode: c.highlightMode
highlightMode: c.highlightMode,
authormarks: c.authormarks
};
});
editor.on('change', framework.localChange);
editor.on('change', function( cm, change ) {
markers.localChange(change, framework.localChange);
});
framework.start();

750
www/code/markers.js

@ -0,0 +1,750 @@
define([
'/common/common-util.js',
'/common/sframe-common-codemirror.js',
'/customize/messages.js',
'/bower_components/chainpad/chainpad.dist.js',
], function (Util, SFCodeMirror, Messages, ChainPad) {
var Markers = {};
/* TODO Known Issues
* 1. ChainPad diff is not completely accurate: we're not aware of the other user's cursor
position so if they insert an "a" in the middle of "aaaaa", the diff will think that
the "a" was inserted at the end of this sequence. This is not an issue for the content
but it will cause issues for the colors
2. ChainPad doesn't always provide the good result in case of conflict (?)
e.g. Alice is inserting "pew" at offset 10, Bob is removing 1 character at offset 10
The expected result is to have "pew" and the following character deleted
In some cases, the result is "ew" inserted and the following character not deleted
*/
var debug = function () {};
var MARK_OPACITY = 0.5;
var DEFAULT = {
authors: {},
marks: [[-1, 0, 0, Number.MAX_SAFE_INTEGER, Number.MAX_SAFE_INTEGER]]
};
var addMark = function (Env, from, to, uid) {
if (!Env.enabled) { return; }
var author = Env.authormarks.authors[uid] || {};
if (uid === -1) {
return void Env.editor.markText(from, to, {
css: "background-color: transparent",
attributes: {
'data-type': 'authormark',
'data-uid': uid
}
});
}
uid = Number(uid);
var name = Util.fixHTML(author.name || Messages.anonymous);
var col = Util.hexToRGB(author.color);
var rgba = 'rgba('+col[0]+','+col[1]+','+col[2]+','+Env.opacity+');';
return Env.editor.markText(from, to, {
inclusiveLeft: uid === Env.myAuthorId,
inclusiveRight: uid === Env.myAuthorId,
css: "background-color: " + rgba,
attributes: {
title: Env.opacity ? Messages._getKey('cba_writtenBy', [name]) : '',
'data-type': 'authormark',
'data-uid': uid
}
});
};
var sortMarks = function (a, b) {
if (!Array.isArray(b)) { return -1; }
if (!Array.isArray(a)) { return 1; }
// Check line
if (a[1] < b[1]) { return -1; }
if (a[1] > b[1]) { return 1; }
// Same line: check start offset
if (a[2] < b[2]) { return -1; }
if (a[2] > b[2]) { return 1; }
return 0;
};
/* Formats:
[uid, startLine, startCh, endLine, endCh] (multi line)
[uid, startLine, startCh, endCh] (single line)
[uid, startLine, startCh] (single character)
*/
var parseMark = Markers.parseMark = function (array) {
if (!Array.isArray(array)) { return {}; }
var multiline = typeof(array[4]) !== "undefined";
var singleChar = typeof(array[3]) === "undefined";
return {
uid: array[0],
startLine: array[1],
startCh: array[2],
endLine: multiline ? array[3] : array[1],
endCh: singleChar ? (array[2]+1) : (multiline ? array[4] : array[3])
};
};
var setAuthorMarks = function (Env, authormarks) {
if (!Env.enabled) {
Env.authormarks = {};
return;
}
authormarks = authormarks || {};
if (!authormarks.marks) { authormarks.marks = Util.clone(DEFAULT.marks); }
if (!authormarks.authors) { authormarks.authors = Util.clone(DEFAULT.authors); }
Env.oldMarks = Env.authormarks;
Env.authormarks = authormarks;
};
var getAuthorMarks = function (Env) {
return Env.authormarks;
};
var updateAuthorMarks = function (Env) {
if (!Env.enabled) { return; }
// get author marks
var _marks = [];
var all = [];
var i = 0;
Env.editor.getAllMarks().forEach(function (mark) {
var pos = mark.find();
var attributes = mark.attributes || {};
if (!pos || attributes['data-type'] !== 'authormark') { return; }
var uid = Number(attributes['data-uid']) || 0;
all.forEach(function (obj) {
if (obj.uid !== uid) { return; }
if (obj.removed) { return; }
// Merge left
if (obj.pos.to.line === pos.from.line && obj.pos.to.ch === pos.from.ch) {
obj.removed = true;
_marks[obj.index] = undefined;
obj.mark.clear();
mark.clear();
mark = addMark(Env, obj.pos.from, pos.to, uid);
pos.from = obj.pos.from;
return;
}
// Merge right
if (obj.pos.from.line === pos.to.line && obj.pos.from.ch === pos.to.ch) {
obj.removed = true;
_marks[obj.index] = undefined;
obj.mark.clear();
mark.clear();
mark = addMark(Env, pos.from, obj.pos.to, uid);
pos.to = obj.pos.to;
}
});
var array = [uid, pos.from.line, pos.from.ch];
if (pos.from.line === pos.to.line && pos.to.ch > (pos.from.ch+1)) {
// If there is more than 1 character, add the "to" character
array.push(pos.to.ch);
} else if (pos.from.line !== pos.to.line) {
// If the mark is on more than one line, add the "to" line data
Array.prototype.push.apply(array, [pos.to.line, pos.to.ch]);
}
_marks.push(array);
all.push({
uid: uid,
pos: pos,
mark: mark,
index: i
});
i++;
});
_marks.sort(sortMarks);
debug('warn', _marks);
Env.authormarks.marks = _marks.filter(Boolean);
};
// Fix all marks located after the given operation in the provided document
var fixMarksFromOp = function (Env, op, marks, doc) {
var pos = SFCodeMirror.posToCursor(op.offset, doc); // pos of start offset
var rPos = SFCodeMirror.posToCursor(op.offset + op.toRemove, doc); // end of removed content
var removed = doc.slice(op.offset, op.offset + op.toRemove).split('\n'); // removed content
var added = op.toInsert.split('\n'); // added content
var posEndLine = pos.line + added.length - 1; // end line after op
var posEndCh = added[added.length - 1].length; // end ch after op
var addLine = added.length - removed.length;
var addCh = added[added.length - 1].length - removed[removed.length - 1].length;
if (addLine > 0) { addCh -= pos.ch; }
else if (addLine < 0) { addCh += pos.ch; }
else { posEndCh += pos.ch; }
var splitted;
marks.forEach(function (mark, i) {
if (!mark) { return; }
var p = parseMark(mark);
// Don't update marks located before the operation
if (p.endLine < pos.line || (p.endLine === pos.line && p.endCh < pos.ch)) { return; }
// Remove markers that have been deleted by my changes
if ((p.startLine > pos.line || (p.startLine === pos.line && p.startCh >= pos.ch)) &&
(p.endLine < rPos.line || (p.endLine === rPos.line && p.endCh <= rPos.ch))) {
marks[i] = undefined;
return;
}
// Update markers that have been cropped right
if (p.endLine < rPos.line || (p.endLine === rPos.line && p.endCh <= rPos.ch)) {
mark[3] = pos.line;
mark[4] = pos.ch;
return;
}
// Update markers that have been cropped left. This markers will be affected by
// my toInsert so don't abort
if (p.startLine < rPos.line || (p.startLine === rPos.line && p.startCh < rPos.ch)) {
// If our change will split an existing mark, put the existing mark after the change
// and create a new mark before
if (p.startLine < pos.line || (p.startLine === pos.line && p.startCh < pos.ch)) {
splitted = [mark[0], mark[1], mark[2], pos.line, pos.ch];
}
mark[1] = rPos.line;
mark[2] = rPos.ch;
}
// Apply my toInsert the to remaining marks
mark[1] += addLine;
if (typeof(mark[4]) !== "undefined") { mark[3] += addLine; }
if (mark[1] === posEndLine) {
mark[2] += addCh;
if (typeof(mark[4]) === "undefined" && typeof(mark[3]) !== "undefined") {
mark[3] += addCh;
} else if (typeof(mark[4]) !== "undefined" && mark[3] === posEndLine) {
mark[4] += addCh;
}
}
});
if (op.toInsert.length) {
marks.push([Env.myAuthorId, pos.line, pos.ch, posEndLine, posEndCh]);
}
if (splitted) {
marks.push(splitted);
}
marks.sort(sortMarks);
};
// Remove marks added by OT and fix the incorrect ones
// first: data about the change with the lowest offset
// last: data about the change with the latest offset
// in the comments, "I" am "first"
var fixMarks = function (Env, first, last, content, toKeepEnd) {
var toKeep = [];
var toJoin = {};
debug('error', "Fix marks");
debug('warn', first);
debug('warn', last);
if (first.me !== last.me) {
// Get their start position compared to the authDoc
var lastAuthOffset = last.offset + last.total;
var lastAuthPos = SFCodeMirror.posToCursor(lastAuthOffset, last.doc);
// Get their start position compared to the localDoc
var lastLocalOffset = last.offset + first.total;
var lastLocalPos = SFCodeMirror.posToCursor(lastLocalOffset, first.doc);
// Keep their changes in the marks (after their offset)
last.marks.some(function (array, i) {
var p = parseMark(array);
// End of the mark before offset? ignore
if (p.endLine < lastAuthPos.line) { return; }
// Take everything from the first mark ending after the pos
if (p.endLine > lastAuthPos.line || p.endCh >= lastAuthPos.ch) {
toKeep = last.marks.slice(i);
last.marks.splice(i);
return true;
}
});
// Keep my marks (based on currentDoc) before their changes
first.marks.some(function (array, i) {
var p = parseMark(array);
// End of the mark before offset? ignore
if (p.endLine < lastLocalPos.line) { return; }
// Take everything from the first mark ending after the pos
if (p.endLine > lastLocalPos.line || p.endCh >= lastLocalPos.ch) {
first.marks.splice(i);
return true;
}
});
}
// If we still have markers in "first", store the last one so that we can "join"
// everything at the end
if (first.marks.length) {
var toJoinMark = first.marks[first.marks.length - 1].slice();
toJoin = parseMark(toJoinMark);
}
// Add the new markers to the result
Array.prototype.unshift.apply(toKeepEnd, toKeep);
debug('warn', toJoin);
debug('warn', toKeep);
debug('warn', toKeepEnd);
// Fix their offset: compute added lines and added characters on the last line
// using the chainpad operation data (toInsert and toRemove)
var pos = SFCodeMirror.posToCursor(first.offset, content);
var removed = content.slice(first.offset, first.offset + first.toRemove).split('\n');
var added = first.toInsert.split('\n');
var posEndLine = pos.line + added.length - 1; // end line after op
var addLine = added.length - removed.length;
var addCh = added[added.length - 1].length - removed[removed.length - 1].length;
if (addLine > 0) { addCh -= pos.ch; }
if (addLine < 0) { addCh += pos.ch; }
toKeepEnd.forEach(function (array) {
// Push to correct lines
array[1] += addLine;
if (typeof(array[4]) !== "undefined") { array[3] += addLine; }
// If they have markers on my end line, push their "ch"
if (array[1] === posEndLine) {
array[2] += addCh;
// If they have no end line, it means end line === start line,
// so we also push their end offset
if (typeof(array[4]) === "undefined" && typeof(array[3]) !== "undefined") {
array[3] += addCh;
} else if (typeof(array[4]) !== "undefined" && array[3] === posEndLine) {
array[4] += addCh;
}
}
});
if (toKeep.length && toJoin && typeof(toJoin.endLine) !== "undefined"
&& typeof(toJoin.endCh) !== "undefined") {
// Make sure the marks are joined correctly:
// fix the start position of the marks to keep
// Note: we must preserve the same end for this mark if it was single line!
if (typeof(toKeepEnd[0][4]) === "undefined") { // Single line
toKeepEnd[0][4] = toKeepEnd[0][3] || (toKeepEnd[0][2]+1); // preserve end ch
toKeepEnd[0][3] = toKeepEnd[0][1]; // preserve end line
}
toKeepEnd[0][1] = toJoin.endLine;
toKeepEnd[0][2] = toJoin.endCh;
}
debug('log', 'Fixed');
debug('warn', toKeepEnd);
};
var checkMarks = function (Env, userDoc) {
var chainpad = Env.framework._.cpNfInner.chainpad;
var editor = Env.editor;
var CodeMirror = Env.CodeMirror;
Env.enabled = Boolean(userDoc.authormarks && userDoc.authormarks.marks);
setAuthorMarks(Env, userDoc.authormarks);
if (!Env.enabled) { return; }
debug('error', 'Check marks');
var authDoc = JSON.parse(chainpad.getAuthDoc() || '{}');
if (!authDoc.content || !userDoc.content) { return; }
var authPatch = chainpad.getAuthBlock();
if (authPatch.isFromMe) {
debug('log', 'Switch branch, from me');
debug('log', authDoc.content);
debug('log', authDoc.authormarks.marks);
debug('log', userDoc.content);
// We're switching to a different branch that was created by us.
// We can't trust localDoc anymore because it contains data from the other branch
// It means the only changes that we need to consider are ours.
// Diff between userDoc and authDoc to see what we changed
var _myOps = ChainPad.Diff.diff(authDoc.content, userDoc.content).reverse();
var authormarks = Util.clone(authDoc.authormarks);
_myOps.forEach(function (op) {
fixMarksFromOp(Env, op, authormarks.marks, authDoc.content);
});
authormarks.marks = authormarks.marks.filter(Boolean);
debug('log', 'Fixed marks');
debug('warn', authormarks.marks);
setAuthorMarks(Env, authormarks);
return;
}
var oldMarks = Env.oldMarks;
if (authDoc.content === userDoc.content) { return; } // No uncommitted work
if (!userDoc.authormarks || !Array.isArray(userDoc.authormarks.marks)) { return; }
debug('warn', 'Begin...');
var localDoc = CodeMirror.canonicalize(editor.getValue());
var commonParent = chainpad.getAuthBlock().getParent().getContent().doc;
var content = JSON.parse(commonParent || '{}').content || '';
var theirOps = ChainPad.Diff.diff(content, authDoc.content);
var myOps = ChainPad.Diff.diff(content, localDoc);
debug('log', theirOps);
debug('log', myOps);
if (!myOps.length || !theirOps.length) { return; }
// If I have uncommited content when receiving a remote patch, all the operations
// placed after someone else's changes will create marker issues. We have to fix it
var sorted = [];
var myTotal = 0;
var theirTotal = 0;
var parseOp = function (me) {
return function (op) {
var size = (op.toInsert.length - op.toRemove);
sorted.push({
me: me,
offset: op.offset,
toInsert: op.toInsert,
toRemove: op.toRemove,
size: size,
marks: (me ? (oldMarks && oldMarks.marks)
: (authDoc.authormarks && authDoc.authormarks.marks)) || [],
doc: me ? localDoc : authDoc.content
});
if (me) { myTotal += size; }
else { theirTotal += size; }
};
};
myOps.forEach(parseOp(true));
theirOps.forEach(parseOp(false));
// Sort the operation in reverse order of offset
// If an operation from them has the same offset than an operation from me, put mine first
sorted.sort(function (a, b) {
if (a.offset === b.offset) {
return a.me ? -1 : 1;
}
return b.offset - a.offset;
});
debug('log', sorted);
// We start from the end so that we don't have to fix the offsets everytime
var prev;
var toKeepEnd = [];
sorted.forEach(function (op) {
// Not the same author? fix!
if (prev) {
// Provide the new "totals"
prev.total = prev.me ? myTotal : theirTotal;
op.total = op.me ? myTotal : theirTotal;
// Fix the markers
fixMarks(Env, op, prev, content, toKeepEnd);
}
if (op.me) { myTotal -= op.size; }
else { theirTotal -= op.size; }
prev = op;
});
debug('log', toKeepEnd);
// We now have all the markers located after the first operation (ordered by offset).
// Prepend the markers placed before this operation
var first = sorted[sorted.length - 1];
if (first) { Array.prototype.unshift.apply(toKeepEnd, first.marks); }
// Commit our new markers
Env.authormarks.marks = toKeepEnd;
debug('warn', toKeepEnd);
debug('warn', '...End');
};
// Reset marks displayed in CodeMirror to the marks stored in Env
var setMarks = function (Env) {
// on remote update: remove all marks, add new marks if colors are enabled
Env.editor.getAllMarks().forEach(function (marker) {
if (marker.attributes && marker.attributes['data-type'] === 'authormark') {
marker.clear();
}
});
if (!Env.enabled) { return; }
debug('error', 'setMarks');
debug('log', Env.authormarks.marks);
var authormarks = Env.authormarks;
authormarks.marks.forEach(function (mark) {
var uid = mark[0];
if (uid !== -1 && (!authormarks.authors || !authormarks.authors[uid])) { return; }
var from = {};
var to = {};
from.line = mark[1];
from.ch = mark[2];
if (mark.length === 3) {
to.line = mark[1];
to.ch = mark[2]+1;
} else if (mark.length === 4) {
to.line = mark[1];
to.ch = mark[3];
} else if (mark.length === 5) {
to.line = mark[3];
to.ch = mark[4];
}
// Remove marks that are placed under this one
try {
Env.editor.findMarks(from, to).forEach(function (mark) {
if (!mark || !mark.attributes || mark.attributes['data-type'] !== 'authormark') { return; }
mark.clear();
});
} catch (e) {
console.warn(mark, JSON.stringify(authormarks.marks));
console.error(from, to);
console.error(e);
}
addMark(Env, from, to, uid);
});
};
var setMyData = function (Env) {
if (!Env.enabled) { return; }
var userData = Env.common.getMetadataMgr().getUserData();
var old = Env.authormarks.authors[Env.myAuthorId];
Env.authormarks.authors[Env.myAuthorId] = {
name: userData.name,
curvePublic: userData.curvePublic,
color: userData.color
};
if (!old || (old.name === userData.name && old.color === userData.color)) { return; }
return true;
};
var localChange = function (Env, change, cb) {
cb = cb || function () {};
if (!Env.enabled) { return void cb(); }
debug('error', 'Local change');
debug('log', change, true);
if (change.origin === "setValue") {
// If the content is changed from a remote patch, we call localChange
// in "onContentUpdate" directly
return;
}
if (change.text === undefined || ['+input', 'paste'].indexOf(change.origin) === -1) {
return void cb();
}
// add new author mark if text is added. marks from removed text are removed automatically
// change.to is not always correct, fix it!
var to_add = {
line: change.from.line + change.text.length-1,
};
if (change.text.length > 1) {
// Multiple lines => take the length of the text added to the last line
to_add.ch = change.text[change.text.length-1].length;
} else {
// Single line => use the "from" position and add the length of the text
to_add.ch = change.from.ch + change.text[change.text.length-1].length;
}
// If my text is inside an existing mark:
// * if it's my mark, do nothing
// * if it's someone else's mark, break it
// We can only have one author mark at a given position, but there may be
// another mark (cursor selection...) at this position so we use ".some"
var toSplit, abort;
Env.editor.findMarks(change.from, to_add).some(function (mark) {
if (!mark.attributes) { return; }
if (mark.attributes['data-type'] !== 'authormark') { return; }
if (mark.attributes['data-uid'] !== Env.myAuthorId) {
toSplit = {
mark: mark,
uid: mark.attributes['data-uid']
};
} else {
// This is our mark: abort to avoid making a new one
abort = true;
}
return true;
});
if (abort) { return void cb(); }
// Add my data to the doc if it's missing
if (!Env.authormarks.authors[Env.myAuthorId]) {
setMyData(Env);
}
if (toSplit && toSplit.mark && typeof(toSplit.uid) !== "undefined") {
// Break the other user's mark if needed
var _pos = toSplit.mark.find();
toSplit.mark.clear();
addMark(Env, _pos.from, change.from, toSplit.uid); // their mark, 1st part
addMark(Env, change.from, to_add, Env.myAuthorId); // my mark
addMark(Env, to_add, _pos.to, toSplit.uid); // their mark, 2nd part
} else {
// Add my mark
addMark(Env, change.from, to_add, Env.myAuthorId);
}
cb();
};
var setButton = function (Env, $button) {
var toggle = function () {
var tippy = $button[0] && $button[0]._tippy;
if (Env.opacity) {
Env.opacity = 0;
if (tippy) { tippy.title = Messages.cba_show; }
else { $button.attr('title', Messages.cba_show); }
$button.removeClass("cp-toolbar-button-active");
} else {
Env.opacity = MARK_OPACITY;
if (tippy) { tippy.title = Messages.cba_hide; }
else { $button.attr('title', Messages.cba_hide); }
$button.addClass("cp-toolbar-button-active");
}
};
toggle();
Env.$button = $button;
$button.click(function() {
toggle();
setMarks(Env);
});
};
var authorUid = function (existing) {
if (!Array.isArray(existing)) { existing = []; }
var n;
var i = 0;
while (!n || existing.indexOf(n) !== -1 && i++ < 1000) {
n = Math.floor(Math.random() * 1000000);
}
// If we can't find a valid number in 1000 iterations, use 0...
if (existing.indexOf(n) !== -1) { n = 0; }
return n;
};
var getAuthorId = function (Env) {
var existing = Object.keys(Env.authormarks.authors || {}).map(Number);
if (!Env.common.isLoggedIn()) { return authorUid(existing); }
var userData = Env.common.getMetadataMgr().getUserData();
var uid;
existing.some(function (id) {
var author = Env.authormarks.authors[id] || {};
if (author.curvePublic !== userData.curvePublic) { return; }
uid = Number(id);
return true;
});
return uid || authorUid(existing);
};
var ready = function (Env) {
Env.ready = true;
Env.myAuthorId = getAuthorId(Env);
if (!Env.enabled) { return; }
if (Env.$button) { Env.$button.show(); }
if (!Env.authormarks.marks || !Env.authormarks.marks.length) {
Env.authormarks = Util.clone(DEFAULT);
}
setMarks(Env);
};
var getState = function (Env) {
return Boolean(Env.authormarks && Env.authormarks.marks);
};
var setState = function (Env, enabled) {
// If the state has changed in the pad, change the Env too
if (!Env.ready) { return; }
if (Env.enabled === enabled) { return; }
Env.enabled = enabled;
if (!Env.enabled) {
// Reset marks
Env.authormarks = {};
setMarks(Env);
if (Env.$button) { Env.$button.hide(); }
} else {
Env.myAuthorId = getAuthorId(Env);
// If it's a reset, add initial marker
if (!Env.authormarks.marks || !Env.authormarks.marks.length) {
Env.authormarks = Util.clone(DEFAULT);
setMarks(Env);
}
if (Env.$button) { Env.$button.show(); }
}
if (Env.ready) { Env.framework.localChange(); }
};
Markers.create = function (config) {
var Env = config;
Env.authormarks = {};
Env.enabled = false;
Env.myAuthorId = 0;
if (Env.devMode) {
debug = function (level, obj, logObject) {
var f = console.log;
if (typeof(console[level]) === "function") {
f = console[level];
}
if (logObject) { return void f(obj); }
};
}
var metadataMgr = Env.common.getMetadataMgr();
metadataMgr.onChange(function () {
// If the markers are disabled or if I haven't pushed content since the last reset,
// don't update my data
if (!Env.enabled || !Env.myAuthorId || !Env.authormarks.authors ||
!Env.authormarks.authors[Env.myAuthorId]) {
return;
}
// Update my data
var changed = setMyData(Env);
if (changed) {
setMarks(Env);
Env.framework.localChange();
}
});
var call = function (f) {
return function () {
try {
[].unshift.call(arguments, Env);
return f.apply(null, arguments);
} catch (e) {
console.error(e);
}
};
};
return {
addMark: call(addMark),
getAuthorMarks: call(getAuthorMarks),
updateAuthorMarks: call(updateAuthorMarks),
checkMarks: call(checkMarks),
setMarks: call(setMarks),
localChange: call(localChange),
ready: call(ready),
setButton: call(setButton),
getState: call(getState),
setState: call(setState),
};
};
return Markers;
});

4
www/code/mermaid-new.css

@ -0,0 +1,4 @@
.sectionTitle, .titleText {
font-weight: bold;
}

59134
www/code/mermaid.js
File diff suppressed because it is too large
View File

56
www/code/mermaid.min.js
File diff suppressed because it is too large
View File

9
www/common/application_config_internal.js

@ -20,7 +20,7 @@ define(function() {
* users and these users will be redirected to the login page if they still try to access
* the app
*/
config.registeredOnlyTypes = ['file', 'contacts', 'oodoc', 'ooslide', 'sheet', 'notifications'];
config.registeredOnlyTypes = ['file', 'contacts', 'oodoc', 'ooslide', 'notifications'];
/* CryptPad is available is multiple languages, but only English and French are maintained
* by the developers. The other languages may be outdated, and any missing string for a langauge
@ -32,6 +32,13 @@ define(function() {
*/
//config.availableLanguages = ['en', 'fr', 'de'];
/* You can display a link to the imprint (legal notice) of your website in the static pages
* footer. To do so, you can either set the following value to `true` and create an imprint.html page
* in the `customize` directory. You can also set it to an absolute URL if your imprint page already exists.
*/
config.imprint = false;
// config.imprint = true;
// config.imprint = 'https://xwiki.com/en/company/legal-notice';
/* Cryptpad apps use a common API to display notifications to users
* by default, notifications are hidden after 5 seconds

176
www/common/common-hash.js

@ -15,6 +15,19 @@ var factory = function (Util, Crypto, Nacl) {
.decodeUTF8(JSON.stringify(list))));
};
Hash.generateSignPair = function () {
var ed = Nacl.sign.keyPair();
var makeSafe = function (key) {
return Crypto.b64RemoveSlashes(key).replace(/=+$/g, '');
};
return {
validateKey: Hash.encodeBase64(ed.publicKey),
signKey: Hash.encodeBase64(ed.secretKey),
safeValidateKey: makeSafe(Hash.encodeBase64(ed.publicKey)),
safeSignKey: makeSafe(Hash.encodeBase64(ed.secretKey)),
};
};
var getEditHashFromKeys = Hash.getEditHashFromKeys = function (secret) {
var version = secret.version;
var data = secret.keys;
@ -47,6 +60,23 @@ var factory = function (Util, Crypto, Nacl) {
return '/2/' + secret.type + '/view/' + Crypto.b64RemoveSlashes(data.viewKeyStr) + '/' + pass;
}
};
Hash.getHiddenHashFromKeys = function (type, secret, opts) {
opts = opts || {};
var canEdit = (secret.keys && secret.keys.editKeyStr) || secret.key;
var mode = (!opts.view && canEdit) ? 'edit/' : 'view/';
var pass = secret.password ? 'p/' : '';
if (secret.keys && secret.keys.fileKeyStr) { mode = ''; }
var hash = '/3/' + type + '/' + mode + secret.channel + '/' + pass;
var hashData = Hash.parseTypeHash(type, hash);
if (hashData && hashData.getHash) {
return hashData.getHash(opts || {});
}
return hash;
};
var getFileHashFromKeys = Hash.getFileHashFromKeys = function (secret) {
var version = secret.version;
var data = secret.keys;
@ -134,14 +164,41 @@ Version 1
/code/#/1/edit/3Ujt4F2Sjnjbis6CoYWpoQ/usn4+9CqVja8Q7RZOGTfRgqI
*/
var getOwnerKey = function (hashArr) {
var k;
// Check if we have a ownerKey for this pad
hashArr.some(function (data) {
if (data.length === 86) {
k = data;
return true;
}
});
return k;
};
var parseTypeHash = Hash.parseTypeHash = function (type, hash) {
if (!hash) { return; }
var options;
var options = [];
var parsed = {};
var hashArr = fixDuplicateSlashes(hash).split('/');
var addOptions = function () {
parsed.password = options.indexOf('p') !== -1;
parsed.present = options.indexOf('present') !== -1;
parsed.embed = options.indexOf('embed') !== -1;
parsed.ownerKey = getOwnerKey(options);
};
if (['media', 'file', 'user', 'invite'].indexOf(type) === -1) {
parsed.type = 'pad';
parsed.getHash = function () { return hash; };
parsed.getOptions = function () {
return {
embed: parsed.embed,
present: parsed.present,
ownerKey: parsed.ownerKey,
password: parsed.password
};
};
if (hash.slice(0,1) !== '/' && hash.length >= 56) { // Version 0
// Old hash
parsed.channel = hash.slice(0, 32);
@ -149,6 +206,18 @@ Version 1
parsed.version = 0;
return parsed;
}
// Version >= 1: more hash options
parsed.getHash = function (opts) {
var hash = hashArr.slice(0, 5).join('/') + '/';
var owner = typeof(opts.ownerKey) !== "undefined" ? opts.ownerKey : parsed.ownerKey;
if (owner) { hash += owner + '/'; }
if (parsed.password || opts.password) { hash += 'p/'; }
if (opts.embed) { hash += 'embed/'; }
if (opts.present) { hash += 'present/'; }
return hash;
};
if (hashArr[1] && hashArr[1] === '1') { // Version 1
parsed.version = 1;
parsed.mode = hashArr[2];
@ -156,15 +225,8 @@ Version 1
parsed.key = Crypto.b64AddSlashes(hashArr[4]);
options = hashArr.slice(5);
parsed.present = options.indexOf('present') !== -1;
parsed.embed = options.indexOf('embed') !== -1;
parsed.getHash = function (opts) {
var hash = hashArr.slice(0, 5).join('/') + '/';
if (opts.embed) { hash += 'embed/'; }
if (opts.present) { hash += 'present/'; }
return hash;
};
addOptions();
return parsed;
}
if (hashArr[1] && hashArr[1] === '2') { // Version 2
@ -174,17 +236,19 @@ Version 1
parsed.key = hashArr[4];
options = hashArr.slice(5);
parsed.password = options.indexOf('p') !== -1;
parsed.present = options.indexOf('present') !== -1;
parsed.embed = options.indexOf('embed') !== -1;
parsed.getHash = function (opts) {
var hash = hashArr.slice(0, 5).join('/') + '/';
if (parsed.password) { hash += 'p/'; }
if (opts.embed) { hash += 'embed/'; }
if (opts.present) { hash += 'present/'; }
return hash;
};
addOptions();
return parsed;
}
if (hashArr[1] && hashArr[1] === '3') { // Version 3: hidden hash
parsed.version = 3;
parsed.app = hashArr[2];
parsed.mode = hashArr[3];
parsed.channel = hashArr[4];
options = hashArr.slice(5);
addOptions();
return parsed;
}
return parsed;
@ -192,29 +256,54 @@ Version 1
parsed.getHash = function () { return hashArr.join('/'); };
if (['media', 'file'].indexOf(type) !== -1) {
parsed.type = 'file';
parsed.getOptions = function () {
return {
embed: parsed.embed,
present: parsed.present,
ownerKey: parsed.ownerKey,
password: parsed.password
};
};
parsed.getHash = function (opts) {
var hash = hashArr.slice(0, 4).join('/') + '/';
var owner = typeof(opts.ownerKey) !== "undefined" ? opts.ownerKey : parsed.ownerKey;
if (owner) { hash += owner + '/'; }
if (parsed.password || opts.password) { hash += 'p/'; }
if (opts.embed) { hash += 'embed/'; }
if (opts.present) { hash += 'present/'; }
return hash;
};
if (hashArr[1] && hashArr[1] === '1') {
parsed.version = 1;
parsed.channel = hashArr[2].replace(/-/g, '/');
parsed.key = hashArr[3].replace(/-/g, '/');
options = hashArr.slice(4);
addOptions();
return parsed;
}
if (hashArr[1] && hashArr[1] === '2') { // Version 2
parsed.version = 2;
parsed.app = hashArr[2];
parsed.key = hashArr[3];
options = hashArr.slice(4);
parsed.password = options.indexOf('p') !== -1;
parsed.present = options.indexOf('present') !== -1;
parsed.embed = options.indexOf('embed') !== -1;
parsed.getHash = function (opts) {
var hash = hashArr.slice(0, 4).join('/') + '/';
if (parsed.password) { hash += 'p/'; }
if (opts.embed) { hash += 'embed/'; }
if (opts.present) { hash += 'present/'; }
return hash;
};
addOptions();
return parsed;
}
if (hashArr[1] && hashArr[1] === '3') { // Version 3: hidden hash
parsed.version = 3;
parsed.app = hashArr[2];
parsed.channel = hashArr[3];
options = hashArr.slice(4);
addOptions();
return parsed;
}
return parsed;
@ -268,6 +357,10 @@ Version 1
url += '#' + hash;
return url;
};
ret.getOptions = function () {
if (!ret.hashData || !ret.hashData.getOptions) { return {}; }
return ret.hashData.getOptions();
};
if (!/^https*:\/\//.test(href)) {
idx = href.indexOf('/#');
@ -290,6 +383,14 @@ Version 1
return ret;
};
Hash.hashToHref = function (hash, type) {
return '/' + type + '/#' + hash;
};
Hash.hrefToHash = function (href) {
var parsed = Hash.parsePadUrl(href);
return parsed.hash;
};
Hash.getRelativeHref = function (href) {
if (!href) { return; }
if (href.indexOf('#') === -1) { return; }
@ -310,7 +411,7 @@ Version 1
secret.version = 2;
secret.type = type;
};
if (!secretHash && !window.location.hash) { //!/#/.test(window.location.href)) {
if (!secretHash) {
generate();
return secret;
} else {
@ -320,12 +421,7 @@ Version 1
if (!type) { throw new Error("getSecrets with a hash requires a type parameter"); }
parsed = parseTypeHash(type, secretHash);
hash = secretHash;
} else {
var pHref = parsePadUrl(window.location.href);
parsed = pHref.hashData;
hash = pHref.hash;
}
//var hash = secretHash || window.location.hash.slice(1);
if (hash.length === 0) {
generate();
return secret;
@ -461,8 +557,8 @@ Version 1
if (typeof(parsed.hashData.version) === "undefined") { return; }
// pads and files should have a base64 (or hex) key
if (parsed.hashData.type === 'pad' || parsed.hashData.type === 'file') {
if (!parsed.hashData.key) { return; }
if (!/^[a-zA-Z0-9+-/=]+$/.test(parsed.hashData.key)) { return; }
if (!parsed.hashData.key && !parsed.hashData.channel) { return; }
if (parsed.hashData.key && !/^[a-zA-Z0-9+-/=]+$/.test(parsed.hashData.key)) { return; }
}
}
return true;

332
www/common/common-interface.js

@ -55,6 +55,10 @@ define([
return $('button.ok').last();
};
UI.removeModals = function () {
$('div.alertify').remove();
};
var listenForKeys = UI.listenForKeys = function (yes, no, el) {
var handler = function (e) {
e.stopPropagation();
@ -66,6 +70,7 @@ define([
if (typeof(yes) === 'function') { yes(e); }
break;
}
$(el || window).off('keydown', handler);
};
$(el || window).keydown(handler);
@ -161,6 +166,17 @@ define([
return h('p.msg', h('input', attrs));
};
dialog.textTypeInput = function (dropdown) {
var attrs = {
type: 'text',
'class': 'cp-text-type-input',
};
return h('p.msg.cp-alertify-type-container', h('div.cp-alertify-type', [
h('input', attrs),
dropdown // must be a "span"
]));
};
dialog.nav = function (content) {
return h('nav', content || [
dialog.cancelButton(),
@ -180,12 +196,14 @@ define([
]);
var $frame = $(frame);
frame.closeModal = function (cb) {
frame.closeModal = function () {}; // Prevent further calls
$frame.fadeOut(150, function () {
$frame.detach();
cb();
if (typeof(cb) === "function") { cb(); }
});
};
return $frame.click(function (e) {
$frame.find('.cp-dropdown-content').hide();
e.stopPropagation();
})[0];
};
@ -201,22 +219,29 @@ define([
var titles = [];
var active = 0;
tabs.forEach(function (tab, i) {
if (!tab.content || !tab.title) { return; }
if (!(tab.content || tab.disabled) || !tab.title) { return; }
var content = h('div.alertify-tabs-content', tab.content);
var title = h('span.alertify-tabs-title', tab.title);
var title = h('span.alertify-tabs-title'+ (tab.disabled ? '.disabled' : ''), tab.title);
if (tab.icon) {
var icon = h('i', {class: tab.icon});
$(title).prepend(' ').prepend(icon);
}
$(title).click(function () {
if (tab.disabled) { return; }
var old = tabs[active];
if (old.onHide) { old.onHide(); }
titles.forEach(function (t) { $(t).removeClass('alertify-tabs-active'); });
contents.forEach(function (c) { $(c).removeClass('alertify-tabs-content-active'); });
if (tab.onShow) {
tab.onShow();
}
$(title).addClass('alertify-tabs-active');
$(content).addClass('alertify-tabs-content-active');
active = i;
});
titles.push(title);
contents.push(content);
if (tab.active) { active = i; }
if (tab.active && !tab.disabled) { active = i; }
});
if (contents.length) {
$(contents[active]).addClass('alertify-tabs-content-active');
@ -356,12 +381,14 @@ define([
};
dialog.getButtons = function (buttons, onClose) {
if (!buttons) { return; }
if (!Array.isArray(buttons)) { return void console.error('Not an array'); }
if (!buttons.length) { return; }
var navs = [];
buttons.forEach(function (b) {
if (!b.name || !b.onClick) { return; }
var button = h('button', { tabindex: '1', 'class': b.className || '' }, b.name);
$(button).click(function () {
var todo = function () {
var noClose = b.onClick();
if (noClose) { return; }
var $modal = $(button).parents('.alertify').first();
@ -372,7 +399,17 @@ define([
}
});
}
});
};
if (b.confirm) {
UI.confirmButton(button, {
classes: 'danger',
divClasses: 'left'
}, todo);
} else {
$(button).click(function () {
todo();
});
}
if (b.keys && b.keys.length) { $(button).attr('data-keys', JSON.stringify(b.keys)); }
navs.push(button);
});
@ -423,6 +460,50 @@ define([
setTimeout(function () {
Notifier.notify();
});
return frame;
};
UI.createModal = function (cfg) {
var $body = cfg.$body || $('body');
var $blockContainer = cfg.id && $body.find('#'+cfg.id);
if (!$blockContainer || !$blockContainer.length) {
var id = '';
if (cfg.id) { id = '#'+cfg.id; }
$blockContainer = $(h('div.cp-modal-container'+id, {
tabindex: 1
}));
}
var deleted = false;
var hide = function () {
if (deleted) { return; }
$blockContainer.hide();
if (!cfg.id) {
deleted = true;
$blockContainer.remove();
}
if (cfg.onClose) { cfg.onClose(); }
};
$blockContainer.html('').appendTo($body);
var $block = $(h('div.cp-modal')).appendTo($blockContainer);
$(h('span.cp-modal-close.fa.fa-times', {
title: Messages.filePicker_close
})).click(hide).appendTo($block);
$body.click(hide);
$block.click(function (e) {
e.stopPropagation();
});
$body.keydown(function (e) {
if (e.which === 27) {
hide();
}
});
return {
$modal: $blockContainer,
show: function () {
$blockContainer.css('display', 'flex');
},
hide: hide
};
};
UI.alert = function (msg, cb, opt) {
@ -460,7 +541,7 @@ define([
stopListening(listener);
cb();
});
listener = listenForKeys(close, close);
listener = listenForKeys(close, close, frame);
var $ok = $(ok).click(close);
document.body.appendChild(frame);
@ -468,13 +549,19 @@ define([
$ok.focus();
Notifier.notify();
});
return {
element: frame,
delete: close
};
};
UI.prompt = function (msg, def, cb, opt, force) {
cb = cb || function () {};
opt = opt || {};
var inputBlock = opt.password ? UI.passwordInput() : dialog.textInput();
var inputBlock = opt.password ? UI.passwordInput() :
(opt.typeInput ? dialog.textTypeInput(opt.typeInput) : dialog.textInput());
var input = $(inputBlock).is('input') ? inputBlock : $(inputBlock).find('input')[0];
input.value = typeof(def) === 'string'? def: '';
@ -558,7 +645,7 @@ define([
$ok.click();
}, function () {
$cancel.click();
}, ok);
}, frame);
document.body.appendChild(frame);
setTimeout(function () {
@ -569,6 +656,70 @@ define([
}
});
};
UI.confirmButton = function (originalBtn, config, _cb) {
config = config || {};
var cb = Util.once(Util.mkAsync(_cb));
var classes = 'btn ' + (config.classes || 'btn-primary');
var button = h('button', {
"class": classes,
title: config.title || ''
}, Messages.areYouSure);
var $button = $(button);
var div = h('div', {
"class": config.classes || ''
});
var timer = h('div.cp-button-timer', div);
var content = h('div.cp-button-confirm', [
button,
timer
]);
if (config.divClasses) {
$(content).addClass(config.divClasses);
}
var to;
var done = function (res) {
if (res) { cb(res); }
clearTimeout(to);
$(content).detach();
$(originalBtn).show();
};
$button.click(function () {
done(true);
});
var TIMEOUT = 3000;
var INTERVAL = 10;
var i = 1;
var todo = function () {
var p = 100 * ((TIMEOUT - (i * INTERVAL)) / TIMEOUT);
if (i++ * INTERVAL >= TIMEOUT) {
done(false);
return;
}
$(div).css('width', p+'%');
to = setTimeout(todo, INTERVAL);
};
$(originalBtn).addClass('cp-button-confirm-placeholder').click(function () {
i = 1;
to = setTimeout(todo, INTERVAL);
$(originalBtn).hide().after(content);
});
return {
reset: function () {
done(false);
}
};
};
UI.proposal = function (content, cb) {
var buttons = [{
@ -1026,39 +1177,36 @@ define([
return radio;
};
var corner = {
queue: [],
state: false
};
UI.cornerPopup = function (text, actions, footer, opts) {
opts = opts || {};
var minimize = h('div.cp-corner-minimize.fa.fa-window-minimize');
var maximize = h('div.cp-corner-maximize.fa.fa-window-maximize');
var dontShowAgain = h('div.cp-corner-dontshow', [
h('span.fa.fa-times'),
Messages.dontShowAgain
]);
var popup = h('div.cp-corner-container', [
minimize,
maximize,
h('div.cp-corner-filler', { style: "width:110px;" }),
h('div.cp-corner-filler', { style: "width:80px;" }),
h('div.cp-corner-filler', { style: "width:60px;" }),
h('div.cp-corner-filler', { style: "width:40px;" }),
h('div.cp-corner-filler', { style: "width:20px;" }),
setHTML(h('div.cp-corner-text'), text),
h('div.cp-corner-actions', actions),
setHTML(h('div.cp-corner-footer'), footer)
setHTML(h('div.cp-corner-footer'), footer),
opts.dontShowAgain ? dontShowAgain : undefined
]);
var $popup = $(popup);
$(minimize).click(function () {
$popup.addClass('cp-minimized');
});
$(maximize).click(function () {
$popup.removeClass('cp-minimized');
});
if (opts.hidden) {
$popup.addClass('cp-minimized');
}
if (opts.big) {
$popup.addClass('cp-corner-big');
}
if (opts.alt) {
$popup.addClass('cp-corner-alt');
}
var hide = function () {
$popup.hide();
@ -1068,9 +1216,35 @@ define([
};
var deletePopup = function () {
$popup.remove();
if (!corner.queue.length) {
// Make sure no other popup is displayed in the next 5s
setTimeout(function () {
if (corner.queue.length) {
$('body').append(corner.queue.pop());
return;
}
corner.state = false;
}, 5000);
return;
}
setTimeout(function () {
$('body').append(corner.queue.pop());
}, 5000);
};
$('body').append(popup);
$(dontShowAgain).click(function () {
deletePopup();
if (typeof(opts.dontShowAgain) === "function") {
opts.dontShowAgain();
}
});
if (corner.state) {
corner.queue.push(popup);
} else {
corner.state = true;
$('body').append(popup);
}
return {
popup: popup,
@ -1080,5 +1254,109 @@ define([
};
};
UI.makeSpinner = function ($container) {
var $ok = $('<span>', {'class': 'fa fa-check', title: Messages.saved}).hide();
var $spinner = $('<span>', {'class': 'fa fa-spinner fa-pulse'}).hide();
var state = false;
var to;
var spin = function () {
clearTimeout(to);
state = true;
$ok.hide();
$spinner.show();
};
var hide = function () {
clearTimeout(to);
state = false;
$ok.hide();
$spinner.hide();
};
var done = function () {
clearTimeout(to);
state = false;
$ok.show();
$spinner.hide();
to = setTimeout(function () {
$ok.hide();
}, 500);
};
if ($container && $container.append) {
$container.append($ok);
$container.append($spinner);
}
return {
getState: function () { return state; },
ok: $ok[0],
spinner: $spinner[0],
spin: spin,
hide: hide,
done: done
};
};
UI.createContextMenu = function (menu) {
var $menu = $(menu).appendTo($('body'));
var display = function (e) {
$menu.css({ display: "block" });
var h = $menu.outerHeight();
var w = $menu.outerWidth();
var wH = window.innerHeight;
var wW = window.innerWidth;
if (h > wH) {
$menu.css({
top: '0px',
bottom: ''
});
} else if (e.pageY + h <= wH) {
$menu.css({
top: e.pageY+'px',
bottom: ''
});
} else {
$menu.css({
bottom: '0px',
top: ''
});
}
if(w > wW) {
$menu.css({
left: '0px',
right: ''
});
} else if (e.pageX + w <= wW) {
$menu.css({
left: e.pageX+'px',
right: ''
});
} else {
$menu.css({
left: '',
right: '0px',
});
}
};
var hide = function () {
$menu.hide();
};
var remove = function () {
$menu.remove();
};
$('body').click(hide);
return {
menu: menu,
show: display,
hide: hide,
remove: remove
};
};
return UI;
});

12
www/common/common-messaging.js

@ -53,10 +53,18 @@ define([
return list;
};
Msg.declineFriendRequest = function (store, data, cb) {
store.mailbox.sendTo('DECLINE_FRIEND_REQUEST', {}, {
channel: data.notifications,
curvePublic: data.curvePublic
}, function (obj) {
cb(obj);
});
};
Msg.acceptFriendRequest = function (store, data, cb) {
var friend = getFriend(store.proxy, data.curvePublic) || {};
var myData = createData(store.proxy, friend.channel || data.channel);
store.mailbox.sendTo('ACCEPT_FRIEND_REQUEST', myData, {
store.mailbox.sendTo('ACCEPT_FRIEND_REQUEST', { user: myData }, {
channel: data.notifications,
curvePublic: data.curvePublic
}, function (obj) {
@ -110,7 +118,7 @@ define([
var proxy = store.proxy;
var friend = proxy.friends[curvePublic];
if (!friend) { return void cb({error: 'ENOENT'}); }
if (!friend.notifications || !friend.channel) { return void cb({error: 'EINVAL'}); }
if (!friend.notifications) { return void cb({error: 'EINVAL'}); }
store.mailbox.sendTo('UNFRIEND', {
curvePublic: proxy.curvePublic

1916
www/common/common-ui-elements.js
File diff suppressed because it is too large
View File

74
www/common/common-util.js

@ -34,6 +34,9 @@
};
Util.mkAsync = function (f) {
if (typeof(f) !== 'function') {
throw new Error('EXPECTED_FUNCTION');
}
return function () {
var args = Array.prototype.slice.call(arguments);
setTimeout(function () {
@ -65,10 +68,29 @@
};
};
Util.response = function () {
Util.mkTimeout = function (_f, ms) {
ms = ms || 0;
var f = Util.once(_f);
var timeout = setTimeout(function () {
f('TIMEOUT');
}, ms);
return Util.both(f, function () {
clearTimeout(timeout);
});
};
Util.response = function (errorHandler) {
var pending = {};
var timeouts = {};
if (typeof(errorHandler) !== 'function') {
errorHandler = function (label) {
throw new Error(label);
};
}
var clear = function (id) {
clearTimeout(timeouts[id]);
delete timeouts[id];
@ -76,8 +98,8 @@
};
var expect = function (id, fn, ms) {
if (typeof(id) !== 'string') { throw new Error("EXPECTED_STRING"); }
if (typeof(fn) !== 'function') { throw new Error("EXPECTED_CALLBACK"); }
if (typeof(id) !== 'string') { errorHandler('EXPECTED_STRING'); }
if (typeof(fn) !== 'function') { errorHandler('EXPECTED_CALLBACK'); }
pending[id] = fn;
if (typeof(ms) === 'number' && ms) {
timeouts[id] = setTimeout(function () {
@ -89,8 +111,21 @@
var handle = function (id, args) {
var fn = pending[id];
if (typeof(fn) !== 'function') { throw new Error("MISSING_CALLBACK"); }
pending[id].apply(null, Array.isArray(args)? args : [args]);
if (typeof(fn) !== 'function') {
errorHandler("MISSING_CALLBACK", {
id: id,
args: args,
});
}
try {
pending[id].apply(null, Array.isArray(args)? args : [args]);
} catch (err) {
errorHandler('HANDLER_ERROR', {
error: err,
id: id,
args: args,
});
}
clear(id);
};
@ -99,6 +134,9 @@
expected: function (id) {
return Boolean(pending[id]);
},
expectation: function (id) {
return pending[id];
},
expect: expect,
handle: handle,
};
@ -224,6 +262,7 @@
else if (bytes >= oneMegabyte) { return 'MB'; }
};
// given a path, asynchronously return an arraybuffer
Util.fetch = function (src, cb, progress) {
var CB = Util.once(cb);
@ -268,8 +307,8 @@
Util.throttle = function (f, ms) {
var to;
var g = function () {
window.clearTimeout(to);
to = window.setTimeout(Util.bake(f, Util.slice(arguments)), ms);
clearTimeout(to);
to = setTimeout(Util.bake(f, Util.slice(arguments)), ms);
};
return g;
};
@ -428,6 +467,27 @@
return false;
};
var emoji_patt = /([\uD800-\uDBFF][\uDC00-\uDFFF])/;
var isEmoji = function (str) {
return emoji_patt.test(str);
};
var emojiStringToArray = function (str) {
var split = str.split(emoji_patt);
var arr = [];
for (var i=0; i<split.length; i++) {
var char = split[i];
if (char !== "") {
arr.push(char);
}
}
return arr;
};
Util.getFirstCharacter = function (str) {
if (!str || !str.trim()) { return '?'; }
var emojis = emojiStringToArray(str);
return isEmoji(emojis[0])? emojis[0]: str[0];
};
if (typeof(module) !== 'undefined' && module.exports) {
module.exports = Util;
} else if ((typeof(define) !== 'undefined' && define !== null) && (define.amd !== null)) {

273
www/common/cryptpad-common.js

@ -6,6 +6,7 @@ define([
'/common/common-messaging.js',
'/common/common-constants.js',
'/common/common-feedback.js',
'/common/visible.js',
'/common/userObject.js',
'/common/outer/local-store.js',
'/common/outer/worker-channel.js',
@ -14,7 +15,7 @@ define([
'/customize/application_config.js',
'/bower_components/nthen/index.js',
], function (Config, Messages, Util, Hash,
Messaging, Constants, Feedback, UserObject, LocalStore, Channel, Block,
Messaging, Constants, Feedback, Visible, UserObject, LocalStore, Channel, Block,
AppConfig, Nthen) {
/* This file exposes functionality which is specific to Cryptpad, but not to
@ -49,6 +50,12 @@ define([
account: {},
};
// Store the href in memory
// This is a placeholder value overriden in common.ready from sframe-common-outer
var currentPad = common.currentPad = {
href: window.location.href
};
// COMMON
common.getLanguage = function () {
return Messages._languageUsed;
@ -374,7 +381,7 @@ define([
common.getMetadata = function (cb) {
var parsed = Hash.parsePadUrl(window.location.href);
var parsed = Hash.parsePadUrl(currentPad.href);
postMessage("GET_METADATA", parsed && parsed.type, function (obj) {
if (obj && obj.error) { return void cb(obj.error); }
cb(null, obj);
@ -394,7 +401,7 @@ define([
common.setPadAttribute = function (attr, value, cb, href) {
cb = cb || function () {};
href = Hash.getRelativeHref(href || window.location.href);
href = Hash.getRelativeHref(href || currentPad.href);
postMessage("SET_PAD_ATTRIBUTE", {
href: href,
attr: attr,
@ -405,7 +412,7 @@ define([
});
};
common.getPadAttribute = function (attr, cb, href) {
href = Hash.getRelativeHref(href || window.location.href);
href = Hash.getRelativeHref(href || currentPad.href);
if (!href) {
return void cb('E404');
}
@ -505,7 +512,7 @@ define([
};
common.saveAsTemplate = function (Cryptput, data, cb) {
var p = Hash.parsePadUrl(window.location.href);
var p = Hash.parsePadUrl(currentPad.href);
if (!p.type) { return; }
// PPP: password for the new template?
var hash = Hash.createRandomHash(p.type);
@ -537,13 +544,35 @@ define([
});
};
var fixPadMetadata = function (parsed, copy) {
var meta;
if (Array.isArray(parsed) && typeof(parsed[3]) === "object") {
meta = parsed[3].metadata; // pad
} else if (parsed.info) {
meta = parsed.info; // poll
} else {
meta = parsed.metadata;
}
if (typeof(meta) === "object") {
meta.defaultTitle = meta.title || meta.defaultTitle;
if (copy) {
meta.defaultTitle = Messages._getKey('copy_title', [meta.defaultTitle]);
}
meta.title = "";
delete meta.users;
delete meta.chat2;
delete meta.chat;
delete meta.cursor;
}
};
common.useTemplate = function (data, Crypt, cb, optsPut) {
// opts is used to overrides options for chainpad-netflux in cryptput
// it allows us to add owners and expiration time if it is a new file
var href = data.href;
var parsed = Hash.parsePadUrl(href);
var parsed2 = Hash.parsePadUrl(window.location.href);
var parsed2 = Hash.parsePadUrl(currentPad.href);
if(!parsed) { throw new Error("Cannot get template hash"); }
postMessage("INCREMENT_TEMPLATE_USE", href);
@ -570,24 +599,7 @@ define([
try {
// Try to fix the title before importing the template
var parsed = JSON.parse(val);
var meta;
if (Array.isArray(parsed) && typeof(parsed[3]) === "object") {
meta = parsed[3].metadata; // pad
} else if (parsed.info) {
meta = parsed.info; // poll
} else {
meta = parsed.metadata;
}
if (typeof(meta) === "object") {
meta.defaultTitle = meta.title || meta.defaultTitle;
meta.title = "";
delete meta.users;
delete meta.chat2;
delete meta.chat;
delete meta.cursor;
if (data.chat) { meta.chat2 = data.chat; }
if (data.cursor) { meta.cursor = data.cursor; }
}
fixPadMetadata(parsed);
val = JSON.stringify(parsed);
} catch (e) {
console.log("Can't fix template title", e);
@ -601,66 +613,104 @@ define([
var fileHost = Config.fileHost || window.location.origin;
var data = common.fromFileData;
var parsed = Hash.parsePadUrl(data.href);
var parsed2 = Hash.parsePadUrl(window.location.href);
var hash = parsed.hash;
var name = data.title;
var secret = Hash.getSecrets('file', hash, data.password);
var src = fileHost + Hash.getBlobPathFromHex(secret.channel);
var key = secret.keys && secret.keys.cryptKey;
var u8;
var res;
var mode;
var parsed2 = Hash.parsePadUrl(currentPad.href);
if (parsed2.type === 'poll') { optsPut.initialState = '{}'; }
var val;
Nthen(function(waitFor) {
Util.fetch(src, waitFor(function (err, _u8) {
if (err) { return void waitFor.abort(); }
u8 = _u8;
}));
}).nThen(function (waitFor) {
require(["/file/file-crypto.js"], waitFor(function (FileCrypto) {
FileCrypto.decrypt(u8, key, waitFor(function (err, _res) {
if (err || !_res.content) { return void waitFor.abort(); }
res = _res;
}));
}));
}).nThen(function (waitFor) {
var ext = Util.parseFilename(data.title).ext;
if (!ext) {
mode = "text";
Nthen(function(_waitFor) {
// If pad, use cryptget
if (parsed.hashData && parsed.hashData.type === 'pad') {
var optsGet = {
password: data.password,
initialState: parsed.type === 'poll' ? '{}' : undefined
};
Crypt.get(parsed.hash, _waitFor(function (err, _val) {
if (err) {
_waitFor.abort();
return void cb();
}
try {
val = JSON.parse(_val);
fixPadMetadata(val, true);
} catch (e) {
_waitFor.abort();
return void cb();
}
}), optsGet);
return;
}
require(["/common/modes.js"], waitFor(function (Modes) {
Modes.list.some(function (fType) {
if (fType.ext === ext) {
mode = fType.mode;
return true;
var name = data.title;
var secret = Hash.getSecrets(parsed.type, parsed.hash, data.password);
var src = fileHost + Hash.getBlobPathFromHex(secret.channel);
var key = secret.keys && secret.keys.cryptKey;
var u8;
var res;
var mode;
// Otherwise, it's a text blob "open in code": get blob data & convert format
Nthen(function (waitFor) {
Util.fetch(src, waitFor(function (err, _u8) {
if (err) {
_waitFor.abort();
return void waitFor.abort();
}
});
}));
}).nThen(function (waitFor) {
var reader = new FileReader();
reader.addEventListener('loadend', waitFor(function (e) {
val = {
content: e.srcElement.result,
highlightMode: mode,
metadata: {
defaultTitle: name,
title: name,
type: "code",
},
};
}));
reader.readAsText(res.content);
u8 = _u8;
}));
}).nThen(function (waitFor) {
require(["/file/file-crypto.js"], waitFor(function (FileCrypto) {
FileCrypto.decrypt(u8, key, waitFor(function (err, _res) {
if (err || !_res.content) {
_waitFor.abort();
return void waitFor.abort();
}
res = _res;
}));
}));
}).nThen(function (waitFor) {
var ext = Util.parseFilename(data.title).ext;
if (!ext) {
mode = "text";
return;
}
require(["/common/modes.js"], waitFor(function (Modes) {
Modes.list.some(function (fType) {
if (fType.ext === ext) {
mode = fType.mode;
return true;
}
});
}));
}).nThen(function (waitFor) {
var reader = new FileReader();
reader.addEventListener('loadend', waitFor(function (e) {
val = {
content: e.srcElement.result,
highlightMode: mode,
metadata: {
defaultTitle: name,
title: name,
type: "code",
},
};
}));
reader.readAsText(res.content);
}).nThen(_waitFor());
}).nThen(function () {
Crypt.put(parsed2.hash, JSON.stringify(val), cb, optsPut);
Crypt.put(parsed2.hash, JSON.stringify(val), function () {
cb();
Crypt.get(parsed2.hash, function (err, val) {
console.warn(val);
});
}, optsPut);
});
};
// Forget button
common.moveToTrash = function (cb, href) {
href = href || window.location.href;
href = href || currentPad.href;
postMessage("MOVE_TO_TRASH", { href: href }, cb);
};
@ -668,7 +718,7 @@ define([
common.setPadTitle = function (data, cb) {
if (!data || typeof (data) !== "object") { return cb ('Data is not an object'); }
var href = data.href || window.location.href;
var href = data.href || currentPad.href;
var parsed = Hash.parsePadUrl(href);
if (!parsed.hash) { return cb ('Invalid hash'); }
data.href = parsed.getUrl({present: parsed.present});
@ -698,7 +748,7 @@ define([
if (obj.error !== "EAUTH") { console.log("unable to set pad title"); }
return void cb(obj.error);
}
cb();
cb(null, obj);
});
};
@ -755,6 +805,13 @@ define([
cb(void 0, data);
});
};
// Get data about a given channel: use with hidden hashes
common.getPadDataFromChannel = function (obj, cb) {
if (!obj || !obj.channel) { return void cb('EINVAL'); }
postMessage("GET_PAD_DATA_FROM_CHANNEL", obj, function (data) {
cb(void 0, data);
});
};
// Admin
@ -822,7 +879,8 @@ define([
postMessage("LEAVE_PAD", data, cb);
};
pad.sendPadMsg = function (data, cb) {
postMessage("SEND_PAD_MSG", data, cb);
// -1 ==> no timeout, we may receive the callback only when we reconnect
postMessage("SEND_PAD_MSG", data, cb, { timeout: -1 });
};
pad.onReadyEvent = Util.mkEvent();
pad.onMessageEvent = Util.mkEvent();
@ -832,6 +890,7 @@ define([
pad.onConnectEvent = Util.mkEvent();
pad.onErrorEvent = Util.mkEvent();
pad.onMetadataEvent = Util.mkEvent();
pad.onChannelDeleted = Util.mkEvent();
pad.requestAccess = function (data, cb) {
postMessage("REQUEST_PAD_ACCESS", data, cb);
@ -847,6 +906,10 @@ define([
postMessage('GET_PAD_METADATA', data, cb);
};
common.burnPad = function (data) {
postMessage('BURN_PAD', data);
};
common.changePadPassword = function (Crypt, Crypto, data, cb) {
var href = data.href;
var newPassword = data.password;
@ -1019,11 +1082,12 @@ define([
}, waitFor());
}
}).nThen(function () {
common.drive.onChange.fire({path: ['drive', Constants.storageKey]});
cb({
warning: warning,
hash: newHash,
href: newHref,
roHref: newRoHref
roHref: newRoHref,
});
});
};
@ -1152,6 +1216,7 @@ define([
channel: newSecret.channel
}, waitFor());
}).nThen(function () {
common.drive.onChange.fire({path: ['drive', Constants.storageKey]});
cb({
warning: warning,
hash: newHash,
@ -1386,6 +1451,7 @@ define([
}, waitFor());
}));
}).nThen(function () {
common.drive.onChange.fire({path: ['drive', Constants.storageKey]});
cb({
warning: warning,
hash: newHash,
@ -1604,7 +1670,7 @@ define([
hashes = Hash.getHashes(secret);
return void cb(null, hashes);
}
var parsed = Hash.parsePadUrl(window.location.href);
var parsed = Hash.parsePadUrl(currentPad.href);
if (!parsed.type || !parsed.hashData) { return void cb('E_INVALID_HREF'); }
hashes = Hash.getHashes(secret);
@ -1642,14 +1708,17 @@ define([
var ver = arr[1];
if (!ver) { return; }
var verArr = ver.split('.');
verArr[2] = 0;
//verArr[2] = 0;
if (verArr.length !== 3) { return; }
var stored = currentVersion || '0.0.0';
var storedArr = stored.split('.');
storedArr[2] = 0;
var shouldUpdate = parseInt(verArr[0]) > parseInt(storedArr[0]) ||
//storedArr[2] = 0;
var shouldUpdate = JSON.stringify(verArr) !== JSON.stringify(storedArr);
/*
var shouldUpdate = parseInt(verArr[0]) !== parseInt(storedArr[0]) ||
(parseInt(verArr[0]) === parseInt(storedArr[0]) &&
parseInt(verArr[1]) > parseInt(storedArr[1]));
parseInt(verArr[1]) !== parseInt(storedArr[1]));
*/
if (!shouldUpdate) { return; }
currentVersion = ver;
localStorage[CRYPTPAD_VERSION] = ver;
@ -1675,7 +1744,7 @@ define([
LocalStore.logout();
// redirect them to log in, and come back when they're done.
sessionStorage.redirectTo = window.location.href;
sessionStorage.redirectTo = currentPad.href;
window.location.href = '/login/';
};
@ -1685,19 +1754,37 @@ define([
cb();
};
var lastPing = +new Date();
var onPing = function (data, cb) {
lastPing = +new Date();
cb();
};
var timeout = false;
common.onTimeoutEvent = Util.mkEvent();
var onTimeout = function () {
var onTimeout = function (fromOuter) {
var key = fromOuter ? "TIMEOUT_OUTER" : "TIMEOUT_KICK";
Feedback.send(key, true);
timeout = true;
common.onNetworkDisconnect.fire();
common.padRpc.onDisconnectEvent.fire();
common.onTimeoutEvent.fire();
};
Visible.onChange(function (visible) {
if (!visible) { return; }
var now = +new Date();
// If last ping is bigger than 2min, ping the worker
if (now - lastPing > (2 * 60 * 1000)) {
var to = setTimeout(function () {
onTimeout(true);
}, 5000);
postMessage('PING', null, function () {
clearTimeout(to);
});
}
});
var queries = {
PING: onPing,
TIMEOUT: onTimeout,
@ -1736,6 +1823,7 @@ define([
PAD_CONNECT: common.padRpc.onConnectEvent.fire,
PAD_ERROR: common.padRpc.onErrorEvent.fire,
PAD_METADATA: common.padRpc.onMetadataEvent.fire,
CHANNEL_DELETED: common.padRpc.onChannelDeleted.fire,
// Drive
DRIVE_LOG: common.drive.onLog.fire,
DRIVE_CHANGE: common.drive.onChange.fire,
@ -1776,6 +1864,11 @@ define([
return function (f, rdyCfg) {
rdyCfg = rdyCfg || {};
if (rdyCfg.currentPad) {
currentPad = common.currentPad = rdyCfg.currentPad;
}
if (initialized) {
return void setTimeout(function () { f(void 0, env); });
}
@ -1874,11 +1967,16 @@ define([
anonHash: LocalStore.getFSHash(),
localToken: tryParsing(localStorage.getItem(Constants.tokenKey)), // TODO move this to LocalStore ?
language: common.getLanguage(),
driveEvents: rdyCfg.driveEvents // Boolean
driveEvents: true //rdyCfg.driveEvents // Boolean
};
// if a pad is created from a file
if (sessionStorage[Constants.newPadFileData]) {
common.fromFileData = JSON.parse(sessionStorage[Constants.newPadFileData]);
var _parsed1 = Hash.parsePadUrl(common.fromFileData.href);
var _parsed2 = Hash.parsePadUrl(window.location.href);
if (_parsed1.hashData.type === 'pad') {
if (_parsed1.type !== _parsed2.type) { delete common.fromFileData; }
}
delete sessionStorage[Constants.newPadFileData];
}
@ -2097,7 +2195,10 @@ define([
var parsedNew = Hash.parsePadUrl(newHref);
if (parsedOld.hashData && parsedNew.hashData &&
parsedOld.getUrl() !== parsedNew.getUrl()) {
if (!parsedOld.hashData.key) { oldHref = newHref; return; }
if (parsedOld.hashData.version !== 3 && !parsedOld.hashData.key) {
oldHref = newHref;
return;
}
// If different, reload
document.location.reload();
return;

Some files were not shown because too many files changed in this diff

Loading…
Cancel
Save